Matching entries: 0
settings...
(), "Gestão de processos, indicadores analíticos e impactos sobre o desempenho competitivo em grandes e médias empresas brasileiras dos setores da indústria e de servic#cos#"
BibTeX:
@article{ladeira-gestao:2012,,
  title = {Gestão de processos, indicadores analíticos e impactos sobre o desempenho competitivo em grandes e médias empresas brasileiras dos setores da indústria e de servic#cos#}
}
Huang K and Mi J (2018), "A new non-parametric estimator for instant system availability", Computational Statistics & Data Analysis., February, 2018. Vol. 118, pp. 18-29.
Abstract: Instant availability of a repairable system is a very important measure of its performance. Among the extensive literature in system availability of the steady state, which is the limit of instant availability as time approaches infinity, many methods and approaches have been explored. However, less has been done on instant system availability owing to its theoretical and computational challenges. A new non-parametric estimator of instant availability is proposed. This estimator is both asymptotically consistent and efficient in numerical computation. Multiple numerical simulations are presented to demonstrate the performance of the new estimator.
BibTeX:
@article{huang-new:2018,
  author = {Huang, Kai and Mi, Jie},
  title = {A new non-parametric estimator for instant system availability},
  journal = {Computational Statistics & Data Analysis},
  year = {2018},
  volume = {118},
  pages = {18--29},
  url = {http://www.sciencedirect.com/science/article/pii/S0167947317301949},
  doi = {10.1016/j.csda.2017.09.001}
}
Mostafavi M and Kabiri P (2018), "Detection of repetitive and irregular hypercall attacks from guest virtual machines to Xen hypervisor", Iran Journal of Computer Science., January, 2018. , pp. 1-9.
Abstract: Virtualization is critical to the infrastructure of cloud computing environment and other online services. Hypercall interface is provided by hypervisor to offer privileged requests by the guest domains. Attackers may use this interface to send malicious hypercalls. In the reported work, repetitive hypercall attacks and sending hypercalls within irregular sequences to Xen hypervisor were analyzed, and finally, an intrusion detection system (IDS) is proposed to detect these attacks. The proposed system is placed in the host domain (Dom0). Monitoring hypercalls traffic the system operates based on the identification of irregular behaviors in hypercalls sent from guest domains to hypervisor. Later on, the association rule algorithm is applied on the collected data within a fixed time window, and a set of thresholds for maximum number of all types of the hypercalls is extracted. The results from the implementation of the proposed system show 91% true positive rate.
BibTeX:
@article{mostafavi-detection:2018,
  author = {Mostafavi, Mojtaba and Kabiri, Peyman},
  title = {Detection of repetitive and irregular hypercall attacks from guest virtual machines to Xen hypervisor},
  journal = {Iran Journal of Computer Science},
  year = {2018},
  pages = {1--9},
  url = {https://link.springer.com/article/10.1007/s42044-018-0006-5},
  doi = {10.1007/s42044-018-0006-5}
}
Scarpiniti M, Baccarelli E, Naranjo PGV and Uncini A (2018), "Energy performance of heuristics and meta-heuristics for real-time joint resource scaling and consolidation in virtualized networked data centers", The Journal of Supercomputing., January, 2018. , pp. 1-38.
Abstract: In this paper, we explore on a comparative basis the performance suitability of meta-heuristic, sometime denoted as random search algorithms, and greedy-type heuristics for the energy-saving joint dynamic scaling and consolidation of the network-plus-computing resources hosted by networked virtualized data centers when the target is the support of real-time streaming-type applications. For this purpose, the energy and delay performances of Tabu Search (TS), Simulated Annealing (SA) and Evolutionary Strategy (ES) meta-heuristics are tested and compared with the corresponding ones of Best-Fit Decreasing-type heuristics, in order to give insight on the resulting performance-versus-implementation complexity trade-offs. In principle, the considered meta-heuristics and heuristics are general formal approaches that can be applied to large classes of (typically, non-convex and mixed integer) optimization problems. However, specially for the meta-heuristics, a main challenge is to design them to properly address the real-time joint computing-plus-networking resource consolidation and scaling optimization problem. To this purpose, the aim of this paper is: (i) introduce a novel Virtual Machine Allocation (VMA) scheme that aims at choosing a suitable set of possible Virtual Machine placements among the (possibly, non-homogeneous) set of available servers; (ii) propose a new class of random search algorithms (RSAs) denoted as consolidation meta-heuristic, considering the VMA problem in RSAs. In particular, the design of novel variants of meta-heuristics, namely TS-RSC, SA-RSC and ES-RSC, is particularized to the resource scaling and consolidation (RSC) problem; (iii) compare the results of the obtained new RSAs class against some state-of-the-art heuristic approaches. A set of experimental results, both simulated and real-world ones, support the effectiveness of the proposed approaches against the traditional ones.
BibTeX:
@article{scarpiniti-energy:2018,
  author = {Scarpiniti, Michele and Baccarelli, Enzo and Naranjo, Paola G. Vinueza and Uncini, Aurelio},
  title = {Energy performance of heuristics and meta-heuristics for real-time joint resource scaling and consolidation in virtualized networked data centers},
  journal = {The Journal of Supercomputing},
  year = {2018},
  pages = {1--38},
  url = {https://link.springer.com/article/10.1007/s11227-018-2244-6},
  doi = {10.1007/s11227-018-2244-6}
}
a (2018), "a".
BibTeX:
@misc{a:2018,
  author = {a},
  title = {a},
  year = {2018},
  url = {a}
}
Amazon (2018), "Amazon Web Services".
BibTeX:
@misc{aws:2018,
  author = {Amazon},
  title = {Amazon Web Services},
  year = {2018},
  url = {https://aws.amazon.com}
}
Microsoft (2018), "Microsoft Azure".
BibTeX:
@misc{azure:2018,
  author = {Microsoft},
  title = {Microsoft Azure},
  year = {2018},
  url = {https://azure.microsoft.com}
}
CENELEC (2018), "The Importance of Standards  - CEN-CENELEC".
BibTeX:
@misc{cenelec-importance:2018,
  author = {CENELEC},
  title = {The Importance of Standards  - CEN-CENELEC},
  year = {2018},
  url = {https://www.cencenelec.eu/research/tools/ImportanceENs/Pages/default.aspx}
}
Google (2018), "Google Cloud Platform".
BibTeX:
@misc{google:2018,
  author = {Google},
  title = {Google Cloud Platform},
  year = {2018},
  url = {https://cloud.google.com/}
}
Heroku (2018), "Heroku".
BibTeX:
@misc{heroku:2018,
  author = {Heroku},
  title = {Heroku},
  year = {2018},
  url = {https://www.heroku.com}
}
IBM (2018), "IBM Cloud".
BibTeX:
@misc{ibm:2018,
  author = {IBM},
  title = {IBM Cloud},
  year = {2018},
  url = {https://www.ibm.com/cloud/}
}
ISO (2018), "ISO/IEC JTC 1/SC 27 - IT Security techniques".
BibTeX:
@misc{iso-iec-jtc:2018,
  author = {ISO},
  title = {ISO/IEC JTC 1/SC 27 - IT Security techniques},
  school = {ISO},
  year = {2018},
  url = {https://www.iso.org/committee/45306.html}
}
Uptime Institute (2018), "Tier Certification System".
Abstract: Tiers is the global language of data center performance Tier Classification System Uptime Institute created the standard Tier Classification System as a me...
BibTeX:
@misc{uptime-institute-tier:2018,
  author = {Uptime Institute},
  title = {Tier Certification System},
  journal = {Uptime Institute: Tier Certification},
  year = {2018},
  url = {https://uptimeinstitute.com/tiers}
}
Camargo DS, Miers CC, Pillon MA and Koslovski GP (2017), "MeHarCEn: Um Método de Harmonização do Consumo de Energia em Data Centers", Revista de Informática Teórica e Aplicada., December, 2017. Vol. 24(2), pp. 47-70.
Abstract: O consumo energético controlado e eficiente é um desafio enfrentado diariamente pelos gestores de Data Centers de pequeno, médio e grande porte. A literatura especializada e técnica define diversos guias, equipamentos e mecanismos para essa finalidade. Entretanto, a aplicação combinada dessas soluções é uma tarefa complexa, que, em muitos casos, requer um elevado investimento financeiro. Nesse contexto, o presente trabalho propõe o MeHarCEn, uma iniciativa para gerenciamento combinado e harmônico do consumo de energia em Data Centers. MeHarCEn não depende de soluções proprietárias e pode ser adaptado a Data Centers com configurações distintas. Aplicado a um estudo de caso, um DC de pequeno porte, o MeHarCEn resulta em uma economia de 52,7% no consumo de energia com climatização.
BibTeX:
@article{camargo-meharcen:2017,
  author = {Camargo, Daniel Scheidemantel and Miers, Charles Christian and Pillon, Maurício Aronne and Koslovski, Guilherme Piêgas},
  title = {MeHarCEn: Um Método de Harmonização do Consumo de Energia em Data Centers},
  journal = {Revista de Informática Teórica e Aplicada},
  year = {2017},
  volume = {24},
  number = {2},
  pages = {47--70},
  url = {http://seer.ufrgs.br/index.php/rita/article/view/VOL24-NR2-47},
  doi = {10.22456/2175-2745.76460}
}
Thorat P, Raza SM, Kim DS and Choo H (2017), "Rapid recovery from link failures in software-defined networks", Journal of Communications and Networks., December, 2017. Vol. 19(6), pp. 648-665.
Abstract: Carrier-grade networks (CGNs) can leverage the network programmability of software-defined networking (SDN) to ensure fast recovery and high availability. However, for the successful adoption of SDN, the failure recovery requirement must be addressed. Local detouring is a popular approach for faster recovery rather than path-based end-to-end recovery. For fast local recovery, alternate paths must be preinstalled for each individual flow on the link, which in some cases results in storing thousands of alternate path flow rules. Furthermore, the dependence on the controller for dynamic per-flow detouring may delay the recovery. In this paper, we propose local immediate (LIm) and immediate controller dependent (ICoD) recovery mechanisms to address the limitations of OpenFlow-based link recovery approaches. Our proposed mechanisms considerably reduce the alternate path flow rules by aggregating the disrupted flows using virtual local area network (VLAN) tagging. The proposed algorithms achieve recovery within 3 ms and 20 ms, respectively and satisfy the strict 50 ms recovery requirement of CGNs. LIm and ICoD also reduce the alternate path flow storage requirement by up to 99%. Simulation results reveal that the flow-aggregation also reduces the effort of the controller and minimizes the alternate path installation traffic.
BibTeX:
@article{thorat-rapid:2017,
  author = {Thorat, P. and Raza, S. M. and Kim, D. S. and Choo, H.},
  title = {Rapid recovery from link failures in software-defined networks},
  journal = {Journal of Communications and Networks},
  year = {2017},
  volume = {19},
  number = {6},
  pages = {648--665},
  doi = {10.1109/JCN.2017.000105}
}
Ruck D, Miers C, Pillon MA and Koslovski G (2017), "EAVIRA: Energy-Aware Virtual Infrastructure Reallocation Algorithm", In Proceedings SBESC 2017. Cutiba/PR/Brazil, nov, 2017. , pp. 14.
Abstract: The elastic provisioning of virtual infrastructures enables a dynamic management of cloud resources. Elasticity enables the setup of virtualized resources (computing and communication) in order to meet the hosted application's requirements. Thus, to perform elasticity requests, providers usually rely on reallocation mechanisms and policies. The concerns regarding the environment and the operational costs indicates energy consumption of the data centers as recurring topic in providers policies. Moreover, energy-aware provisioning is beneficial for tenants also. Recent cost models have introduced an implicit incentive to computing and networking resources usage just when requested to avoid high rent costs. In this paper we propose EAVIRA algorithm, which takes into account the proportional sharing of CPU usage of data center servers to calculate individual usage costs to: disable idle equipments, and reallocate virtual infrastructures. EAVIRA acts on online requests for elasticity configuration and performs an offline load balancing, triggered by the IaaS provider. Our experimental analysis indicates a reduction of energy consumption and an increasing on acceptance ratio of allocation requests.
BibTeX:
@inproceedings{artigo-denivy:2017,
  author = {Ruck, Denivy and Miers, Charles and Pillon, Mauricio Aronne and Koslovski, Guilherme},
  title = {EAVIRA: Energy-Aware Virtual Infrastructure Reallocation Algorithm},
  booktitle = {Proceedings SBESC 2017},
  year = {2017},
  pages = {14}
}
Beierl M (2017), "High Availability For OPNFV". October, 2017.
Abstract: This project is focused on the high availability requirements of the OPNFV platform, with regards to the Carrier Grade NFV scenarios. In this project, we address HA requirements and solutions in 3 different perspectives; the hardware HA, the virtual infrastructure HA and the service HA, to be specific. Requirement and API definition of high availability of OPNFV will be output from this project.
BibTeX:
@misc{beierl-high:2017,
  author = {Beierl, Mark},
  title = {High Availability For OPNFV},
  journal = {Opnfv.org},
  year = {2017},
  url = {https://wiki.opnfv.org/display/availability/High+Availability+For+OPNFV}
}
Keshavarzi A, Haghighat AT and Bohlouli M (2017), "Adaptive Resource Management and Provisioning in the Cloud Computing: A Survey of Definitions, Standards and Research Roadmaps", KSII Transactions on Internet and Information Systems., September, 2017. Vol. 11(9), pp. 4280-4300.
Abstract: TIIS Paper Details
BibTeX:
@article{keshavarzi-adaptive:2017,
  author = {Keshavarzi, Amin and Haghighat, Abolfazl Toroghi and Bohlouli, Mahdi},
  title = {Adaptive Resource Management and Provisioning in the Cloud Computing: A Survey of Definitions, Standards and Research Roadmaps},
  journal = {KSII Transactions on Internet and Information Systems},
  year = {2017},
  volume = {11},
  number = {9},
  pages = {4280--4300},
  url = {http://itiis.org/digital-library/manuscript/1788}
}
Nguyen TA, Rui X, Lim D, Oh J, Min D, Choi E, Thang TD and Son NN (2017), "Model-Based Sensitivity of a Disaster Tolerant Active-Active GENESIS Cloud System", In Industrial Networks and Intelligent Systems., September, 2017. , pp. 228-241. Springer, Cham.
Abstract: Modern cloud computing systems are prone to disasters. And the true cost due to service outages is reportedly huge. Some of previous works presented the use of hierarchical models: fault tree (FT), reliability block diagram (RBD) along with state-space models: continuous time Markov chain (CTMC) or stochastic petri nets (SPN) to assess the reliability/availability of cloud systems, but with much simplification. In this paper, we attempt to propose a combinatorial monolithic model using reliability graph (RG) for a real-world cloud system called general purpose integrated cloud system (GENESIS). The system is designed in active-active high availability configuration with two geographically distributed cloud sites for the sake of disaster tolerance (DT). We then present the model-based comprehensive analysis of system reliability/availability and their sensitivity. The results pinpoint different findings in which the architecture of active-active and geographically dispersed sites with appropriate interconnections of the cloud apparently enhance the system reliability/availability and assure disaster tolerance for the cloud.
BibTeX:
@inproceedings{nguyen-model-based:2017,
  author = {Nguyen, Tuan Anh and Rui, Xuhua and Lim, Damsub and Oh, Jun and Min, Dugki and Choi, Eunmi and Thang, Tran Duc and Son, Nguyen Nhu},
  title = {Model-Based Sensitivity of a Disaster Tolerant Active-Active GENESIS Cloud System},
  booktitle = {Industrial Networks and Intelligent Systems},
  publisher = {Springer, Cham},
  year = {2017},
  pages = {228--241},
  url = {https://link.springer.com/chapter/10.1007/978-3-319-74176-5:20},
  doi = {10.1007/978-3-319-74176-5:20}
}
Bhowmik S (2017), "Cloud Computing", July, 2017. Cambridge University Press.
Abstract: Written in a tutorial style, this comprehensive guide follows a structured approach explaining cloud techniques, models and platforms. Popular cloud services such as Amazon, Google and Microsoft Azure are explained in the text. The security risks and challenges of cloud computing are discussed in detail with useful examples. Emerging trends including mobile cloud computing and internet of things are discussed in the book for the benefit of the readers. Numerous review questions, multiple choice exercises and case studies facilitate enhanced understanding. This textbook is ideal for undergraduate and graduate students of computer science engineering, and information technology.
BibTeX:
@book{bhowmik-cloud:2017,
  author = {Bhowmik, Sandeep},
  title = {Cloud Computing},
  publisher = {Cambridge University Press},
  year = {2017}
}
Burns S (2017), "High availability in cloud computing prevents a SPOF", TechTarget: SearchITOperations (07/2017)., July, 2017.
Abstract: Design high availability in cloud computing to preserve application stability during server maintenance work and outages. It's about services, not servers.
BibTeX:
@article{burns-high:2017,
  author = {Burns, Stuart},
  title = {High availability in cloud computing prevents a SPOF},
  journal = {TechTarget: SearchITOperations (07/2017)},
  year = {2017},
  url = {http://searchitoperations.techtarget.com/tip/High-availability-in-cloud-computing-prevents-a-SPOF}
}
Zhang Q, Liu S, Qin S and Shi Y (2017), "A column generation-based algorithm for two-stage, two-dimensional bin packing problem with a variant variable sized constraint", In 2017 36th Chinese Control Conference (CCC)., July, 2017. , pp. 2841-2845.
Abstract: In this paper, we consider two-dimensional bin packing problem with a variant variable sized constraint, where the width of the bin is continuously changed and the height of the bin is discretely changed, with the further restrictions that packing can be rotated and done in two stages. A column generation-based algorithm is proposed when the first cutting stage is the vertical direction. The problem is decomposed into a level packing problem and a level combining problem. Afterwards, a column generation (CG) algorithm and a cutting rule (CR) scheme are proposed respectively. Computational results for the practical instances show the effectiveness of the proposed method. Using the column generation-based algorithm above mentioned we solved the instances found at the OR-LIBRARY.
BibTeX:
@inproceedings{zhang-column:2017,
  author = {Zhang, Q. and Liu, S. and Qin, S. and Shi, Y.},
  title = {A column generation-based algorithm for two-stage, two-dimensional bin packing problem with a variant variable sized constraint},
  booktitle = {2017 36th Chinese Control Conference (CCC)},
  year = {2017},
  pages = {2841--2845},
  doi = {10.23919/ChiCC.2017.8027796}
}
Zhang L, Wu Y, Xue R, Hsu TC, Yang H and Chung YC (2017), "HybridFS x2014; A High Performance and Balanced File System Framework with Multiple Distributed File Systems", In 2017 IEEE 41st Annual Computer Software and Applications Conference (COMPSAC)., July, 2017. Vol. 01, pp. 796-805.
Abstract: In the big data era, the distributed file system is getting more and more significant due to the characteristics of its scale-out capability, high availability, and high performance. Different distributed file systems may have different design goals. For example, some of them are designed to have good performance for small file operations, such as GlusterFS, while some of them are designed for large file operations, such as Hadoop distributed file system. With the divergence of big data applications, a distributed file system may provide good performance for some applications but fails for some other applications, that is, there has no universal distributed file system that can produce good performance for all applications. In this paper, we propose a hybrid file system framework, HybridFS, which can deliver satisfactory performance for all applications. HybridFS is composed of multiple distributed file systems with the integration of advantages of these distributed file systems. In HybridFS, on top of multiple distributed file systems, we have designed a metadata management server to perform three functions: file placement, partial metadata store, and dynamic file migration. The file placement is performed based on a decision tree. The partial metadata store is performed for files whose size is less than a few hundred Bytes to increase throughput. The dynamic file migration is performed to balance the storage usage of distributed file systems without throttling performance. We have implemented HybridFS in java on eight nodes and choose Ceph, HDFS, and GlusterFS as designated distributed file systems. The experimental results show that, in the best case, HybridFS can have up to 30% performance improvement of read/write operations over a single distributed file system. In addition, if the difference of storage usage among multiple distributed file systems is less than 40 the performance of HybridFS is guaranteed, that is, no performance degradation.
BibTeX:
@inproceedings{zhang-hybridfs:2017,
  author = {Zhang, L. and Wu, Y. and Xue, R. and Hsu, T. C. and Yang, H. and Chung, Y. C.},
  title = {HybridFS x2014; A High Performance and Balanced File System Framework with Multiple Distributed File Systems},
  booktitle = {2017 IEEE 41st Annual Computer Software and Applications Conference (COMPSAC)},
  year = {2017},
  volume = {01},
  pages = {796--805},
  doi = {10.1109/COMPSAC.2017.140}
}
Zhou A, Sun Q and Li J (2017), "Enhancing reliability via checkpointing in cloud computing systems", China Communications., July, 2017. Vol. 14(7), pp. 1-10.
Abstract: Cloud computing is becoming an important solution for providing scalable computing resources via Internet. Because there are tens of thousands of nodes in data center, the probability of server failures is nontrivial. Therefore, it is a critical challenge to guarantee the service reliability. Fault-tolerance strategies, such as checkpoint, are commonly employed. Because of the failure of the edge switches, the checkpoint image may become inaccessible. Therefore, current checkpoint-based fault tolerance method cannot achieve the best effect. In this paper, we propose an optimal checkpoint method with edge switch failure-aware. The edge switch failure-aware checkpoint method includes two algorithms. The first algorithm employs the data center topology and communication characteristic for checkpoint image storage server selection. The second algorithm employs the checkpoint image storage characteristic as well as the data center topology to select the recovery server. Simulation experiments are performed to demonstrate the effectiveness of the proposed method.
BibTeX:
@article{zhou-enhancing:2017,
  author = {Zhou, A. and Sun, Q. and Li, J.},
  title = {Enhancing reliability via checkpointing in cloud computing systems},
  journal = {China Communications},
  year = {2017},
  volume = {14},
  number = {7},
  pages = {1--10},
  doi = {10.1109/CC.2017.8010962}
}
Islam T and Manivannan D (2017), "Predicting Application Failure in Cloud: A Machine Learning Approach", In 2017 IEEE International Conference on Cognitive Computing (ICCC)., June, 2017. , pp. 24-31.
Abstract: Despite employing the architectures designed for high service reliability and availability, cloud computing systems do experience service outages and performance slowdown. In addition to these, large-scale cloud systems experience failures in their hardware and software components which often result in node and application (e.g., jobs and tasks) failures. Therefore, to build a reliable cloud system, it is important to understand and characterize the observed failures. The goal of this work is to identify the key features that correlate to application failures in cloud and present a failure prediction model that can correctly predict the outcome of a task or job before it actually finishes, fails or gets killed. To accomplish this, we perform a failure characterization study of the Google cluster workload trace. Our analysis reveals that, there is a significant consumption of resources due to failed and killed jobs. We further explore the potential for failure prediction in cloud applications so that we can reduce the wastage of resources by better managing the jobs and tasks that ultimately fail or get killed. For this, we propose a prediction method based on a special type of Recurrent NeuralNetwork (RNN) named Long Short-Term Memory Network(LSTM) to identify application failures in cloud. It takes resource usage measurements or performance data for each job and task, and the goal is to predict the termination status (e.g., failed and finished etc.) of them. Our algorithm can predict task failures with 87%accuracy and achieves a true positive rate of 85% and false positive rate of 11%.
BibTeX:
@inproceedings{islam-predicting:2017,
  author = {Islam, T. and Manivannan, D.},
  title = {Predicting Application Failure in Cloud: A Machine Learning Approach},
  booktitle = {2017 IEEE International Conference on Cognitive Computing (ICCC)},
  year = {2017},
  pages = {24--31},
  doi = {10.1109/IEEE.ICCC.2017.11}
}
Li X, Qi Y, Chen P and Zhang X (2017), "Optimizing Backup Resources in the Cloud", In 2016 IEEE 9th International Conference on Cloud Computing (CLOUD)., June, 2017. , pp. 790-797.
Abstract: Cloud computing promises high performance and cost-efficiency, however, most cloud infrastructures operate at low utilization which greatly adhere cost effectiveness. Previous works focus on seeking efficient virtual machine (VM) consolidation strategies to increase the utilization of virtual resources in production environment, while overlooking the under-utilization of backup virtual resources. We propose a heuristic time sharing policy derived from the restless multi-armed bandit problem. The proposed policy achieves increasing backup virtual resources utilization while providing high availability. The experiment results show that the traditional 1:1 backup provision can be extended to 1:M (Mtextgreatertextgreater1) between the backup VM and the service VMs, and the utilization of backup VMs can be enhanced significantly.
BibTeX:
@inproceedings{Li2017,
  author = {Li, X. and Qi, Y. and Chen, P. and Zhang, X.},
  title = {Optimizing Backup Resources in the Cloud},
  booktitle = {2016 IEEE 9th International Conference on Cloud Computing (CLOUD)},
  year = {2017},
  pages = {790--797},
  doi = {10.1109/CLOUD.2016.0109}
}
Machado M, Rosendo D, Gomes D, Moreira A, Bezerra M, Sadok D, Endo PT and Curescu C (2017), "Prototyping a high availability PaaS: Performance analysis and lessons learned", In 2017 IFIP/IEEE Symposium on Integrated Network and Service Management (IM)., May, 2017. , pp. 805-808.
Abstract: With cloud computing consolidation, Platform-as-a-Service (PaaS) has been used as a solution for developing applications with low cost and maximum flexibility. However, an open challenge related to PaaS is the proper handling of multi-tier and stateful applications with support for high availability (HA); and scalability can be considered an essential feature for HA. However, dealing with several instances of the same application that access its state in a common area is not a simple task. This paper presents a novel PaaS framework, named NoPaaS, that supports the deployment of multi-tier and stateful applications assuring their availability according to the Service Availability Forum (SAF) redundancy model. The primary goal of this work is to present NoPaaS framework and prototype, and highlight challenges and open issues when providing multi-tier and stateful applications in high availability clouds.
BibTeX:
@inproceedings{machado-prototyping:2017,
  author = {Machado, M. and Rosendo, D. and Gomes, D. and Moreira, A. and Bezerra, M. and Sadok, D. and Endo, P. T. and Curescu, C.},
  title = {Prototyping a high availability PaaS: Performance analysis and lessons learned},
  booktitle = {2017 IFIP/IEEE Symposium on Integrated Network and Service Management (IM)},
  year = {2017},
  pages = {805--808},
  doi = {10.23919/INM.2017.7987367}
}
Tighe M and Bauer M (2017), "Topology and Application Aware Dynamic VM Management in the Cloud", Journal of Grid Computing., May, 2017. , pp. 1-22.
Abstract: Cloud computing continues to mature and more applications continue to be deployed in public clouds. Client applications deployed in the cloud should automatically scale up and down to match changing workload demands, though they must be careful to ensure that sufficient resources are provisioned to achieve performance objectives. The cloud provider, on the other hand, attempts to reduce costs by reducing power consumption by consolidating load onto fewer, highly utilized machines. In this work, we introduce an algorithm that integrates both application autoscaling and dynamic virtual machine (VM) allocation into a single algorithm in order to achieve the goals of both cloud provider and client. Further, we consider multi-VM applications, such as multi-tiered web-based applications, and extend the integrated algorithm to take the network topology into account when placing or migrating applications. The goal is to reduce VM-to-VM communication latency; our focus is on trying to contain applications within the same racks. We evaluate our work through simulation, showing that the integrated algorithm can achieve better application performance with a significant reduction in virtual machine live migrations, and the topology-aware extension successfully places applications within a single rack.
BibTeX:
@article{tighe-topology:2017,
  author = {Tighe, Michael and Bauer, Michael},
  title = {Topology and Application Aware Dynamic VM Management in the Cloud},
  journal = {Journal of Grid Computing},
  year = {2017},
  pages = {1-22},
  url = {https://link.springer.com/article/10.1007/s10723-017-9397-z},
  doi = {10.1007/s10723-017-9397-z}
}
Suh D, Jang S, Han S, Pack S, Kim MS, Kim T and Lim CG (2017), "Toward Highly Available and Scalable Software Defined Networks for Service Providers", IEEE Communications Magazine., April, 2017. Vol. 55(4), pp. 100-107.
Abstract: Software-defined networking is moving from its initial deployment in small-scale data center networks to large-scale carrier-grade networks. In such environments, high availability and scalability are two of the most prominent issues, and thus extensive work is ongoing. In this article, we first review the state of the art on high availability and scalability issues in SDN and investigate relevant open source activities. In particular, two well-known open source projects, OpenDaylight (ODL) and Open Network Operating System (ONOS), are analyzed in terms of high availability (i.e., network state database replication/synchronization and controller failover mechanisms) and scalability (i.e., network state database partition/ distribution and controller assignment mechanisms) issues. We also present experimental results on the flow rule installation/read throughput and the failover time upon a controller failure in ONOS and ODL, and identify open research challenges.
BibTeX:
@article{suh-toward:2017,
  author = {Suh, D. and Jang, S. and Han, S. and Pack, S. and Kim, M. S. and Kim, T. and Lim, C. G.},
  title = {Toward Highly Available and Scalable Software Defined Networks for Service Providers},
  journal = {IEEE Communications Magazine},
  year = {2017},
  volume = {55},
  number = {4},
  pages = {100--107},
  doi = {10.1109/MCOM.2017.1600170}
}
Matos R, Dantas J, Araujo J, Trivedi KS and Maciel P (2017), "Redundant Eucalyptus Private Clouds: Availability Modeling and Sensitivity Analysis", Journal of Grid Computing., March, 2017. Vol. 15(1), pp. 1-22.
Abstract: Cloud computing infrastructures are designed to be accessible anywhere and anytime. This requires various fault tolerance mechanisms for coping with software and hardware failures. Hierarchical modeling approaches are often used to evaluate the availability of such systems, leveraging the representation of complex failure and repair events in distinct parts of the system. This paper presents an availability evaluation for redundant private clouds, represented by RBDs and Markov chains, hierarchically assembled. These private clouds follow the basic architecture of Eucalyptus-based environments, but employing warm-standby redundant hosts for some of its main components. Closed-form equations for the steady-state availability are presented, allowing direct analytical solution for large systems. The availability equations are symbolically differentiated, allowing parametric sensitivity analysis. The results from sensitivity analysis enables system planning for improving the steady- state availability. The sensitivity indices show that failure of the Eucalyptus Cloud Manager subsystem and the respective repair activities deserve priority for maximizing the system availability.
BibTeX:
@article{matos-redundant:2017,
  author = {Matos, Rubens and Dantas, Jamilson and Araujo, Jean and Trivedi, Kishor S. and Maciel, Paulo},
  title = {Redundant Eucalyptus Private Clouds: Availability Modeling and Sensitivity Analysis},
  journal = {Journal of Grid Computing},
  year = {2017},
  volume = {15},
  number = {1},
  pages = {1--22},
  url = {https://link.springer.com/article/10.1007/s10723-016-9381-z},
  doi = {10.1007/s10723-016-9381-z}
}
Zahedi Fard SY, Ahmadi MR and Adabi S (2017), "A dynamic VM consolidation technique for QoS and energy consumption in cloud environment", The Journal of Supercomputing., March, 2017. , pp. 1-22.
Abstract: Cloud-based data centers consume a significant amount of energy which is a costly procedure. Virtualization technology, which can be regarded as the first step in the cloud by offering benefits like the virtual machine and live migration, is trying to overcome this problem. Virtual machines host workload, and because of the variability of workload, virtual machines consolidation is an effective technique to minimize the total number of active servers and unnecessary migrations and consequently improves energy consumption. Effective virtual machine placement and migration techniques act as a key issue to optimize the consolidation process. In this paper, we present a novel virtual machine consolidation technique to achieve energy–QoS–temperature balance in the cloud data center. We simulated our proposed technique in CloudSim simulation. Results of evaluation certify that physical machine temperature, SLA, and migration technique together control the energy consumption and QoS in a cloud data center.
BibTeX:
@article{zahedifard-dynamic:2017,
  author = {Zahedi Fard, Seyed Yahya and Ahmadi, Mohamad Reza and Adabi, Sahar},
  title = {A dynamic VM consolidation technique for QoS and energy consumption in cloud environment},
  journal = {The Journal of Supercomputing},
  year = {2017},
  pages = {1-22},
  url = {https://link.springer.com/article/10.1007/s11227-017:2016-8},
  doi = {10.1007/s11227-017:2016-8}
}
Zhao L, Lu L, Jin Z and Yu C (2017), "Online Virtual Machine Placement for Increasing Cloud Provider’s Revenue", IEEE Transactions on Services Computing., March, 2017. Vol. 10(2), pp. 273-285.
Abstract: Cost savings have become a significant challenge in the management of data centers. In this paper, we show that, besides energy consumption, service level agreement(SLA) violations also severely degrade the cost-efficiency of data centers. We present online VM placement algorithms for increasing cloud provider's revenue. First, First-Fit and Harmonic algorithm are devised for VM placement without considering migrations. Both algorithms get the same performance in the worst-case analysis, and equal to the lower bound of the competitive ratio. However, Harmonic algorithm could create more revenue than First-Fit by more than 10 percent when job arriving rate is greater than 1.0. Second, we formulate an optimization problem of maximizing revenue from VM migration, and prove it as NP-Hard by a reduction from 3-Partition problem. Therefore, we propose two heuristics: Least-Reliable-First (LRF) and Decreased-Density-Greedy (DDG). Experiments demonstrate that DDG yields more revenue than LRF when migration cost is low, yet leads to losses when SLA penalty is low or job arriving rate is high, due to the large number of migrations. Finally, we compare the four algorithms above with algorithms adopted in Openstack using a real trace, and find that the results are consistent with the ones using synthetic data.
BibTeX:
@article{zhao-online:2017,
  author = {Zhao, L. and Lu, L. and Jin, Z. and Yu, C.},
  title = {Online Virtual Machine Placement for Increasing Cloud Provider’s Revenue},
  journal = {IEEE Transactions on Services Computing},
  year = {2017},
  volume = {10},
  number = {2},
  pages = {273-285},
  url = {http://ieeexplore.ieee.org/document/7128735/},
  doi = {10.1109/TSC.2015.2447550}
}
Blind K, Petersen SS and Riillo CAF (2017), "The impact of standards and regulation on innovation in uncertain markets", Research Policy., February, 2017. Vol. 46(1), pp. 249-264.
Abstract: This study analyses the impact of formal standards and regulation on firms’ innovation efficiency, considering different levels of market uncertainty. We argue that formal standards and regulation have different effects, depending on the extent of market uncertainty derived from theoretical considerations about information asymmetry and regulatory capture. Our empirical analysis is based on the German Community Innovation Survey (CIS). The results show that formal standards lead to lower innovation efficiency in markets with low uncertainty, while regulations have the opposite effect. In cases of high market uncertainty, we observe that regulation leads to lower innovation efficiency, while formal standards have the reverse effect. Our results have important implications for the future application of both instruments, showing that their benefits heavily depend on the market environment.
BibTeX:
@article{blind-impact:2017,
  author = {Blind, Knut and Petersen, Sören S. and Riillo, Cesare A. F.},
  title = {The impact of standards and regulation on innovation in uncertain markets},
  journal = {Research Policy},
  year = {2017},
  volume = {46},
  number = {1},
  pages = {249--264},
  url = {http://www.sciencedirect.com/science/article/pii/S0048733316301743},
  doi = {10.1016/j.respol.2016.11.003}
}
Casellas R, Vilalta R, Martínez R and Muñoz R (2017), "Highly available SDN control of flexi-grid networks with network function virtualization-enabled replication", IEEE/OSA Journal of Optical Communications and Networking., February, 2017. Vol. 9(2), pp. A207-A215.
Abstract: New trends and emerging requirements have driven the development of extensions to the path computation element (PCE) architecture beyond the computation of a set of constrained routes and associated resources between endpoints, given a network topology. Such extensions involve the use of a PCE for the control of network services, in which deploying a PCE as a centralized network controller facilitates the adoption of software-defined networking (SDN) principles while allowing a progressive migration of already existing deployments. A key requirement for the adoption of centralized control solutions is the ability to deploy a resilient, secure, dynamically configurable, adaptive, and highly available (virtualized) infrastructure supporting end-to-end services, including critical and vertical ones. Part of this infrastructure is the control plane functional elements (e.g., controllers), and the use of network function virtualization (NFV) is a enabler for the high availability of such elements while additionally reducing OPEX and CAPEX. NFV provides a feature-complete framework for the replication of software components that is a straightforward and commonly adopted approach to address the aforementioned requirement, but it implies the need for timely synchronization of databases between replicas. In this paper we present, implement, and validate an architecture for PCE and SDN control high availability, combining the virtualization of the control function by means of dynamic replication and the timely synchronization of their internal state using the PCEP and BGP-LS protocols. We experimentally validate the approach with a testbed, including a GMPLS/PCE control plane, and a replica management system implemented following the ETSI NFV framework, using the OpenStack cloud management software.
BibTeX:
@article{casellas-highly:2017,
  author = {Casellas, R. and Vilalta, R. and Martínez, R. and Muñoz, R.},
  title = {Highly available SDN control of flexi-grid networks with network function virtualization-enabled replication},
  journal = {IEEE/OSA Journal of Optical Communications and Networking},
  year = {2017},
  volume = {9},
  number = {2},
  pages = {A207--A215},
  doi = {10.1364/JOCN.9.00A207}
}
Prathiba S and Sowvarnica S (2017), "Survey of failures and fault tolerance in cloud", In 2017 2nd International Conference on Computing and Communications Technologies (ICCCT)., February, 2017. , pp. 169-172.
Abstract: Cloud computing provides support for hosting client's application. Cloud is a distributed platform that provides hardware, software and network resources to both execute consumer's application and also to store and mange user's data. Cloud is also used to execute scientific workflow applications that are in general complex in nature when compared to other applications. Since cloud is a distributed platform, it is more prone to errors and failures. In such an environment, avoiding a failure is difficult and identifying the source of failure is also complex. Because of this, fault tolerance mechanisms are implemented on the cloud platform. This ensures that even if there are failures in the environment, critical data of the client is not lost and user's application running on cloud is not affected in any manner. Fault tolerance mechanisms also help in improving the cloud's performance by proving the services to the users as required on demand. In this paper a survey of existing fault tolerance mechanisms for the cloud platform are discussed. This paper also discusses the failures, fault tolerant clustering methods and fault tolerant models that are specific for scientific workflow applications.
BibTeX:
@inproceedings{prathiba-survey:2017,
  author = {Prathiba, S. and Sowvarnica, S.},
  title = {Survey of failures and fault tolerance in cloud},
  booktitle = {2017 2nd International Conference on Computing and Communications Technologies (ICCCT)},
  year = {2017},
  pages = {169--172},
  doi = {10.1109/ICCCT2.2017.7972271}
}
Teng F, Yu L, Li T, Deng D and Magoulès F (2017), "Energy efficiency of VM consolidation in IaaS clouds", The Journal of Supercomputing., February, 2017. Vol. 73(2), pp. 782-809.
BibTeX:
@article{teng-energy:2017,
  author = {Teng, Fei and Yu, Lei and Li, Tianrui and Deng, Danting and Magoulès, Frédéric},
  title = {Energy efficiency of VM consolidation in IaaS clouds},
  journal = {The Journal of Supercomputing},
  year = {2017},
  volume = {73},
  number = {2},
  pages = {782--809},
  url = {http://link.springer.com/10.1007/s11227-016-1797-5},
  doi = {10.1007/s11227-016-1797-5}
}
Abdelsamea A, El-Moursy AA, Hemayed EE and Eldeeb H (2017), "Virtual machine consolidation enhancement using hybrid regression algorithms", Egyptian Informatics Journal., January, 2017.
Abstract: Cloud computing data centers are growing rapidly in both number and capacity to meet the increasing demands for highly-responsive computing and massive storage. Such data centers consume enormous amounts of electrical energy resulting in high operating costs and carbon dioxide emissions. The reason for this extremely high energy consumption is not just the quantity of computing resources and the power inefficiency of hardware, but rather lies in the inefficient usage of these resources. VM consolidation involves live migration of VMs hence the capability of transferring a VM between physical servers with a close to zero down time. It is an effective way to improve the utilization of resources and increase energy efficiency in cloud data centers. VM consolidation consists of host overload/underload detection, VM selection and VM placement. Most of the current VM consolidation approaches apply either heuristic-based techniques, such as static utilization thresholds, decision-making based on statistical analysis of historical data; or simply periodic adaptation of the VM allocation. Most of those algorithms rely on CPU utilization only for host overload detection. In this paper we propose using hybrid factors to enhance VM consolidation. Specifically we developed a multiple regression algorithm that uses CPU utilization, memory utilization and bandwidth utilization for host overload detection. The proposed algorithm, Multiple Regression Host Overload Detection (MRHOD), significantly reduces energy consumption while ensuring a high level of adherence to Service Level Agreements (SLA) since it gives a real indication of host utilization based on three parameters (CPU, Memory, Bandwidth) utilizations instead of one parameter only (CPU utilization). Through simulations we show that our approach reduces power consumption by 6 times compared to single factor algorithms using random workload. Also using PlanetLab workload traces we show that MRHOD improves the ESV metric by about 24% better than other single factor regression algorithms (LR and LRR). Also we developed Hybrid Local Regression Host Overload Detection algorithm (HLRHOD) that is based on local regression using hybrid factors. It outperforms the single factor algorithms.
BibTeX:
@article{abdelsamea-virtual:2017,
  author = {Abdelsamea, Amany and El-Moursy, Ali A. and Hemayed, Elsayed E. and Eldeeb, Hesham},
  title = {Virtual machine consolidation enhancement using hybrid regression algorithms},
  journal = {Egyptian Informatics Journal},
  year = {2017},
  url = {http://www.sciencedirect.com/science/article/pii/S1110866516300925},
  doi = {10.1016/j.eij.2016.12.002}
}
Kanso A, Deixionne N, Gherbi A and Moghaddam FF (2017), "Enhancing OpenStack Fault Tolerance for Provisioning Computing Environments", In 2017 IEEE 18th International Symposium on High Assurance Systems Engineering (HASE)., January, 2017. , pp. 77-83.
Abstract: With the rise of cloud computing and virtualization of resources, cloud management systems are becoming a key differentiator for the quality of service offered by the cloud providers. OpenStack is considered the de-facto open-source cloud management system at the infrastructure as a service layer. Despite the efforts of hardening the high availability of OpenStack, its fault tolerance during the provisioning of resources is yet to be proven. In this paper we present a testing framework for the fault tolerance of OpenStack, namely TestStack. We expose the limitations of OpenStack by injecting runtime failures into a highly available OpenStack environment. Our testing results reveal inconsistencies in the behavior of OpenStack in the presence of failures that we address by proposing our solution, namely FTStack, to harden its fault tolerance.
BibTeX:
@inproceedings{kanso-enhancing:2017,
  author = {Kanso, A. and Deixionne, N. and Gherbi, A. and Moghaddam, F. F.},
  title = {Enhancing OpenStack Fault Tolerance for Provisioning Computing Environments},
  booktitle = {2017 IEEE 18th International Symposium on High Assurance Systems Engineering (HASE)},
  year = {2017},
  pages = {77--83},
  doi = {10.1109/HASE.2017.27}
}
Toy M (2017), "High Availability Layers and Failure Recovery Timers for Virtualized Systems and Services", Procedia Computer Science., January, 2017. Vol. 114, pp. 126-131.
Abstract: Highly available virtualized systems and services are required for applications that are sensitive to down time. The availability of virtualized systems and services needs to be on par with that for non-virtualized systems and services. However, high availability (HA) designs for virtualized systems and services are much more complicated than those for their non-virtualized counterparts due to the existence of independent multiple layers where each layer may have its own failure recovery mechanism. This paper defines a novel approach to the design of highly available virtualized systems and services. The recovery from failures is self-coordinated. There are no race conditions among layers.
BibTeX:
@article{toy-high:2017,
  author = {Toy, Mehmet},
  title = {High Availability Layers and Failure Recovery Timers for Virtualized Systems and Services},
  journal = {Procedia Computer Science},
  year = {2017},
  volume = {114},
  pages = {126--131},
  url = {http://www.sciencedirect.com/science/article/pii/S1877050917318227},
  doi = {10.1016/j.procs.2017.09.028}
}
Chen Y and Jiang Z-a (2017), "Dynamically Predicting the Quality of Service: Batch, Online, and Hybrid Algorithms", Journal of Electrical and Computer Engineering. Vol. 2017
Abstract: This paper studies the problem of dynamically modeling the quality of web service. The philosophy of designing practical web service recommender systems is delivered in this paper. A general system architecture for such systems continuously collects the user-service invocation records and includes both an online training module and an offline training module for quality prediction. In addition, we introduce matrix factorization-based online and offline training algorithms based on the gradient descent algorithms and demonstrate the fitness of this online/offline algorithm framework to the proposed architecture. The superiority of the proposed model is confirmed by empirical studies on a real-life quality of web service data set and comparisons with existing web service recommendation algorithms.
BibTeX:
@article{chen-dynamically:2017,
  author = {Chen, Ya and Jiang, Zhong-an},
  title = {Dynamically Predicting the Quality of Service: Batch, Online, and Hybrid Algorithms},
  journal = {Journal of Electrical and Computer Engineering},
  year = {2017},
  volume = {2017},
  note = {DOI: 10.1155/2017/9547869},
  url = {https://www.hindawi.com/journals/jece/2017/9547869/},
  doi = {10.1155/2017/9547869}
}
Dhanoa I (2017), "AN EXPERIMENT BASED EXAMINATION OF ENERGY OVERHEAD THROUGH VM MIGRATION", International Journal of Advanced Research in Computer Science. Vol. 8(9), pp. 655-660.
Abstract: Abstract: The use of cloud computing has increased in recent days with tremendous benefits to users and also provoked to rise in total ownership cost due to swift increase in energy consumption of data centers. Out of various approaches, best of virtual machine placement policies can be exploited to reduce the energy consumption during migration in data centers. The virtual machine live migration during placement has enough potential to reduce energy consumption with certain level of utilization. One of the mechanisms to achieve energy efficiency is to manage the migration time parameter of live migration with appropriate network bandwidth in communication aware connected data centers. In spite of this, virtual machine size and network bandwidth have a great impact on energy consumption of sub systems during virtual machine live migration. To follow this study, in this paper, analysis has been made to see the impact of migration time on energy consumption of sub systems during guest virtual machine live migration. For experimentation, Kernel-based Virtual Machine (KVM) hypervisor and Virt Manager was used to perform live migration on Ubuntu 14.04 Linux machines in various conditions. Afterwards, the noted observations are validated with Pearson’s Correlation Coefficient statistical approach to study the strength of relationship of defined parameters.
BibTeX:
@article{dhanoa-experiment:2017,
  author = {Dhanoa, Inderjit},
  title = {AN EXPERIMENT BASED EXAMINATION OF ENERGY OVERHEAD THROUGH VM MIGRATION},
  journal = {International Journal of Advanced Research in Computer Science},
  year = {2017},
  volume = {8},
  number = {9},
  pages = {655--660},
  url = {http://ijarcs.in/index.php/Ijarcs/article/view/5070},
  doi = {10.26483/ijarcs.v8i9.5070}
}
Kominos C, Seyvet N and Vandikas K (2017), "Bare-metal, virtual machines and containers in OpenStack", In 20th Conference on Innovations in Clouds, Internet and Networks (ICIN), 2017. , pp. 36-43.
Abstract: Cloud computing is an on-demand access model for computing resources most notably embodied by the OpenStack project. As of release Liberty, OpenStack supports provisioning Bare-metal, Virtual machine (VM) and container based hosts. These different hosts incur different overheads. Consequently, the main goal of this paper is to empirically quantify that overhead through a series of experiments. The following drivers are leveraged in this process: Ironic for Bare-metal or Metal as a Service (MaaS), nova-compute for VM-based hosts, and nova-docker for Docker based containers. We make use of a private-cloud in order to compare the different options. This cloud is then used to compare the different hosts in terms of performance (CPU, networking, disk I/O and RAM) by using various open-source benchmarking tools. We also measure boot-up times. The output of these benchmarks is collected and results are compared. In this paper we discuss our learnings as well as the different configurations and fine-tuning that we implemented. As a result, we provide a set of recommendations based on the advantages and disadvantages of each host in present and future cloud deployments. © 2017 IEEE.
BibTeX:
@inproceedings{kominos-bare:2017,
  author = {Kominos, C.G. and Seyvet, N. and Vandikas, K.},
  title = {Bare-metal, virtual machines and containers in OpenStack},
  booktitle = {20th Conference on Innovations in Clouds, Internet and Networks (ICIN), 2017},
  year = {2017},
  pages = {36--43},
  doi = {10.1109/ICIN.2017.7899247}
}
Koomey J and Taylor J (2017), "ZOMBIE/COMATOSE SERVERS REDUX"
BibTeX:
@article{koomey-zombie/comatose:2017,
  author = {Koomey, Jonathan and Taylor, Jon},
  title = {ZOMBIE/COMATOSE SERVERS REDUX},
  year = {2017},
  url = {http://anthesisgroup.com/wp-content/uploads/2017/03/Comatsoe-Servers-Redux:2017.pdf}
}
Li X, Qi Y, Chen P and Zhang X (2017), "Optimizing Backup Resources in the Cloud" , pp. 790-797.
Abstract: Cloud computing promises high performance and cost-efficiency; however, most cloud infrastructures operate at low utilization which greatly adhere cost effectiveness. Previous works focus on seeking efficient virtual machine (VM) consolidation strategies to increase the utilization of virtual resources in production environment, while overlooking the under-utilization of backup virtual resources. We propose a heuristic time sharing policy derived from the restless multi-armed bandit problem. The proposed policy achieves increasing backup virtual resources utilization while providing high availability. The experiment results show that the traditional 1:1 backup provision can be extended to 1:M (M≥1) between the backup VM and the service VMs, and the utilization of backup VMs can be enhanced significantly. © 2016 IEEE.
BibTeX:
@inproceedings{li-optimizing:2017,
  author = {Li, X. and Qi, Y. and Chen, P. and Zhang, X.},
  title = {Optimizing Backup Resources in the Cloud},
  year = {2017},
  pages = {790--797},
  doi = {10.1109/CLOUD.2016.107}
}
Mazumdar S and Pranzo M (2017), "Power efficient server consolidation for Cloud data center", Future Generation Computer Systems. Vol. 70, pp. 4-16.
Abstract: Cloud computing has become an essential part of the global digital economy due to its extensibility, flexibility and reduced costs of operations. Nowadays, data centers (DCs) contain thousands of different machines running a huge number of diverse applications over an extended period. Resource management in Cloud is an open issue since an efficient resource allocation can reduce the infrastructure running cost. In this paper, we propose a snapshot-based solution for server consolidation problem from Cloud infrastructure provider (CIP) perspective. Our proposed mathematical formulation aims at reducing power cost by employing efficient server consolidation, and also considering the issues such as (i) mapping incoming and failing virtual machines (VMs), (ii) reducing a total number of VM migrations and (iii) consolidating running server workloads. We also compare the performance of our proposed model to the well-known Best Fit heuristics and its extension to include server consolidation via VM migration denoted as Best Fit with Consolidation (BFC). Our proposed mathematical formulation allows us to measure the solution quality in absolute terms, and it can also be applicable in practice. In our simulations, we show that relevant improvements (from 6% to 15 over the widely adopted Best Fit algorithm achieved in a reasonable computing time.
BibTeX:
@article{mazumdar-power:2017,
  author = {Mazumdar, Somnath and Pranzo, Marco},
  title = {Power efficient server consolidation for Cloud data center},
  journal = {Future Generation Computer Systems},
  year = {2017},
  volume = {70},
  pages = {4--16},
  url = {http://www.sciencedirect.com/science/article/pii/S0167739X16308093},
  doi = {10.1016/j.future.2016.12.022}
}
Moreno-Vozmediano R, Montero RS, Huedo E and Llorente IM (2017), "Orchestrating the Deployment of High Availability Services on Multi-zone and Multi-cloud Scenarios", Journal of Grid Computing. , pp. 1-15.
Abstract: Cloud computing has become one of the most used platforms to deploy High Availability (HA) solutions for its flexibility, on-demand provisioning, and elasticity. However, although many providers offer specific tools for HA support, like floating IPs and load balancing, the analysis of downtime at public cloud providers in previous years shows that a combination of several availability zones or cloud providers is required to achieve “five nines” availability. Besides reducing the chances of failure, the use of multiple availability zones and geographically distributed clouds may additionally bring performance and cost benefits. However, the orchestration, in an efficient and adaptive way, of HA multi-tier services in multi-zone and multi-cloud environments brings several challenges. This paper presents a novel orchestration method to automate the deployment and management of high availability multi-tier services on multiple availability zones, by introducing new affinity mechanisms, such as VM to location and role to role affinity/anti-affinity rules. Furthermore, we also extend this solution to multi-cloud scenarios, based on the replication or distribution of the service components among various clouds, along with their corresponding affinity rules.
BibTeX:
@article{moreno-vozmediano-orchestrating:2017,
  author = {Moreno-Vozmediano, R. and Montero, R. S. and Huedo, E. and Llorente, I. M.},
  title = {Orchestrating the Deployment of High Availability Services on Multi-zone and Multi-cloud Scenarios},
  journal = {Journal of Grid Computing},
  year = {2017},
  pages = {1--15},
  note = {DOI: 10.1007/s10723-017-9417-z},
  url = {https://link.springer.com/article/10.1007/s10723-017-9417-z},
  doi = {10.1007/s10723-017-9417-z}
}
Nguyen TL and Lebre A (2017), "Virtual Machine Boot Time Model", In 2017 25th Euromicro International Conference on Parallel, Distributed and Network-Based Processing. , pp. 430-437. IEEE.
BibTeX:
@inproceedings{nguyen-virtual:2017,
  author = {Nguyen, Thuy Linh and Lebre, Adrien},
  title = {Virtual Machine Boot Time Model},
  booktitle = {2017 25th Euromicro International Conference on Parallel, Distributed and Network-Based Processing},
  publisher = {IEEE},
  year = {2017},
  pages = {430--437},
  url = {http://ieeexplore.ieee.org/document/7912684/},
  doi = {10.1109/PDP.2017.58}
}
Masakari (2017), "Masakari - OpenStack".
BibTeX:
@misc{openstack-masakari:2017,
  author = {Masakari},
  title = {Masakari - OpenStack},
  year = {2017},
  url = {https://wiki.openstack.org/wiki/Masakari}
}
Riekstin AC, Rodrigues BB, Nguyen KK, Carvalho TCMdB, Meirosu C, Stiller B and Cheriet M (2017), "A Survey on Metrics and Measurement Tools for Sustainable Distributed Cloud Networks", IEEE Communications Surveys Tutorials. Vol. PP(99), pp. 1-1.
Abstract: Energy efficiency and emissions awareness are core capabilities for sustainable and lower cost distributed cloud networks. In this context, metrics are fundamental for comparison and management purposes, along with the methods and tools which support such metrics’ capture and analysis. However, prior works on green metrics and tools have presented only a partial view, mainly as a result of the recent advances in green networking technologies. In this survey, we present an extensive study of metrics, methods, and tools to support sustainable operations in distributed cloud networks, with the aim of providing an end-to-end and up-to-date scenario to support current and coming research, as well as to analyze existing gaps.
BibTeX:
@article{riekstin-survey:2017,
  author = {Riekstin, A. C. and Rodrigues, B. B. and Nguyen, K. K. and Carvalho, T. C. M. d B. and Meirosu, C. and Stiller, B. and Cheriet, M.},
  title = {A Survey on Metrics and Measurement Tools for Sustainable Distributed Cloud Networks},
  journal = {IEEE Communications Surveys Tutorials},
  year = {2017},
  volume = {PP},
  number = {99},
  pages = {1--1},
  doi = {10.1109/COMST.2017.2784803}
}
Sahoo CN and Goswami V (2017), "Cost and energy optimisation of cloud data centres through dual VM modes - activation and passivation", International Journal of Communication Networks and Distributed Systems. Vol. 18(3/4), pp. 371.
BibTeX:
@article{sahoo-cost:2017,
  author = {Sahoo, Choudhury N. and Goswami, Veena},
  title = {Cost and energy optimisation of cloud data centres through dual VM modes - activation and passivation},
  journal = {International Journal of Communication Networks and Distributed Systems},
  year = {2017},
  volume = {18},
  number = {3/4},
  pages = {371},
  url = {http://www.inderscience.com/link.php?id=10004670},
  doi = {10.1504/IJCNDS.2017.10004670}
}
Sharma O and Saini H (2017), "SLA and Performance Efficient Heuristics for Virtual Machines Placement in Cloud Data Centers", International Journal of Grid and High Performance Computing (IJGHPC). Vol. 9(3), pp. 17-33.
Abstract: SLA and Performance Efficient Heuristics for Virtual Machines Placement in Cloud Data Centers: 10.4018/IJGHPC.2017070102: Cloud computing has revolutionized the working models of IT industry and increasing the demand of cloud resources which further leads to increase in energy
BibTeX:
@article{sharma-sla:2017,
  author = {Sharma, Oshin and Saini, Hemraj},
  title = {SLA and Performance Efficient Heuristics for Virtual Machines Placement in Cloud Data Centers},
  journal = {International Journal of Grid and High Performance Computing (IJGHPC)},
  year = {2017},
  volume = {9},
  number = {3},
  pages = {17--33},
  url = {https://www.igi-global.com/article/sla-and-performance-efficient-heuristics-for-virtual-machines-placement-in-cloud-data-centers/185771},
  doi = {10.4018/IJGHPC.2017070102}
}
Vaezi M and Zhang Y (2017), "Virtualization and Cloud Computing", In Cloud Mobile Networks. , pp. 11-31. Springer, Cham.
Abstract: In an effort to move the networking industry from today’s manual configuration to embrace automated solutions that are coordinated with the rest of the infrastructure, there have been several emerging technologies in the past few years, chief among them are network virtualization (NV), network functions virtualization (NFV), and software-defined networking (SDN).
BibTeX:
@incollection{vaezi-virtualization:2017,
  author = {Vaezi, Mojtaba and Zhang, Ying},
  title = {Virtualization and Cloud Computing},
  booktitle = {Cloud Mobile Networks},
  publisher = {Springer, Cham},
  year = {2017},
  pages = {11--31},
  note = {DOI: 10.1007/978-3-319-54496-02},
  url = {https://link.springer.com/chapter/10.1007/978-3-319-54496-0:2}
}
Madni SHH, Latiff MSA, Coulibaly Y and Abdulhamid SM (2016), "Recent advancements in resource allocation techniques for cloud computing environment: a systematic review", Cluster Computing., December, 2016.
BibTeX:
@article{madni-recent:2016,
  author = {Madni, Syed Hamid Hussain and Latiff, Muhammad Shafie Abd and Coulibaly, Yahaya and Abdulhamid, Shafi’i Muhammad},
  title = {Recent advancements in resource allocation techniques for cloud computing environment: a systematic review},
  journal = {Cluster Computing},
  year = {2016},
  url = {http://link.springer.com/10.1007/s10586-016-0684-4},
  doi = {10.1007/s10586-016-0684-4}
}
Ali A, Lu L, Zhu Y and Yu J (2016), "An Energy Efficient Algorithm for Virtual Machine Allocation in Cloud Datacenters", In Advanced Computer Architecture., August, 2016. , pp. 61-72. Springer, Singapore.
Abstract: In cloud datacenters, virtual machine (VM) allocation in a power efficient way remains a critical research problem. There are a number of algorithms for allocating the workload among different machines. However, existing works do not consider more than one energy efficient host, thus they are not efficient for large scale cloud datacenters. In this paper, we propose a VM allocation algorithm to achieve higher energy efficiency in large scale cloud datacenters. Simulation result shows that, compared with BRS, RR and MPD algorithms, our algorithms can achieve 23 23 % and 9 % more power efficiency in large scale cloud environment.
BibTeX:
@inproceedings{ali-energy:2016,
  author = {Ali, Ahmad and Lu, Li and Zhu, Yanmin and Yu, Jiadi},
  title = {An Energy Efficient Algorithm for Virtual Machine Allocation in Cloud Datacenters},
  booktitle = {Advanced Computer Architecture},
  publisher = {Springer, Singapore},
  year = {2016},
  pages = {61--72},
  url = {https://link.springer.com/chapter/10.1007/978-981-10:2209-8-6},
  doi = {10.1007/978-981-10:2209-8-6}
}
Milani AS and Navimipour NJ (2016), "Load balancing mechanisms and techniques in the cloud environments: Systematic literature review and future trends", Journal of Network and Computer Applications., August, 2016. Vol. 71, pp. 86-98.
BibTeX:
@article{milani-load:2016,
  author = {Milani, Alireza Sadeghi and Navimipour, Nima Jafari},
  title = {Load balancing mechanisms and techniques in the cloud environments: Systematic literature review and future trends},
  journal = {Journal of Network and Computer Applications},
  year = {2016},
  volume = {71},
  pages = {86--98},
  url = {http://linkinghub.elsevier.com/retrieve/pii/S1084804516301217},
  doi = {10.1016/j.jnca.2016.06.003}
}
Mondal S, Muppala J and Machida F (2016), "Virtual Machine Replication on Achieving Energy-Efficiency in a Cloud", Electronics., July, 2016. Vol. 5(3), pp. 37.
BibTeX:
@article{mondal-virtual:2016,
  author = {Mondal, Subrota and Muppala, Jogesh and Machida, Fumio},
  title = {Virtual Machine Replication on Achieving Energy-Efficiency in a Cloud},
  journal = {Electronics},
  year = {2016},
  volume = {5},
  number = {3},
  pages = {37},
  url = {http://www.mdpi.com/2079-9292/5/3/37},
  doi = {10.3390/electronics5030037}
}
Kansal NJ and Chana I (2016), "Energy-aware Virtual Machine Migration for Cloud Computing - A Firefly Optimization Approach", Journal of Grid Computing., June, 2016. Vol. 14(2), pp. 327-345.
Abstract: Energy efficiency has grown into a latest exploration area of virtualized cloud computing paradigm. The increase in the number and the size of the cloud data centers has propagated the need for energy efficiency. An extensively practiced technology in cloud computing is live virtual machine migration and is thus focused in this work to save energy. This paper proposes an energy-aware virtual machine migration technique for cloud computing, which is based on the Firefly algorithm. The proposed technique migrates the maximally loaded virtual machine to the least loaded active node while maintaining the performance and energy efficiency of the data centers. The efficacy of the proposed technique is exhibited by comparing it with other techniques using the CloudSim simulator. An enhancement in the average energy consumption of about 44.39 % has been attained by reducing an average of 72.34 % of migrations and saving 34.36 % of hosts, thereby, making the data center more energy-aware.
BibTeX:
@article{kansal-energy-aware:2016,
  author = {Kansal, Nidhi Jain and Chana, Inderveer},
  title = {Energy-aware Virtual Machine Migration for Cloud Computing - A Firefly Optimization Approach},
  journal = {Journal of Grid Computing},
  year = {2016},
  volume = {14},
  number = {2},
  pages = {327--345},
  url = {https://link.springer.com/article/10.1007/s10723-016-9364-0},
  doi = {10.1007/s10723-016-9364-0}
}
Sahoo C and Goswami V (2016), "Dynamic control and resource management for mission critical multi-tier applications in cloud data center", International Journal of Electrical and Computer Engineering (IJECE)., June, 2016. Vol. 6(3), pp. 1023-1030.
Abstract: The multi-tier architecture style has become an industry standard in modern data centers with each tier providing certain functionality. To avoid congestion and to adhere the SLA under fluctuating workload and unpredictable failures of Mission Critical Multi-tier applications hosted in the cloud, we need a Dynamic admission control policy, such that the requests must be processed from the first tier to the last without any delay. This paper presents the least strict admission control policy, which will induce the maximal throughput, for a two-tier system with parallel servers. We propose an optimization model to minimize the total number of virtual machines for computing resources in each tier by dynamically varying the mean service rate of the VMs. Some performance indicators and computational results showing the effect of model parameters are presented. This model is also applicable to priority as well as real-time based applications in Cloud based environment.
BibTeX:
@article{sahoo-dynamic:2016,
  author = {Sahoo, C.N. and Goswami, V.},
  title = {Dynamic control and resource management for mission critical multi-tier applications in cloud data center},
  journal = {International Journal of Electrical and Computer Engineering (IJECE)},
  year = {2016},
  volume = {6},
  number = {3},
  pages = {1023--1030},
  url = {http://www.iaescore.com/journals/index.php/IJECE/article/view/456},
  doi = {10.11591/ijece.v6i3.10087}
}
Balalaie A, Heydarnoori A and Jamshidi P (2016), "Microservices Architecture Enables DevOps: Migration to a Cloud-Native Architecture", IEEE Software., May, 2016. Vol. 33(3), pp. 42-52.
Abstract: This article reports on experiences and lessons learned during incremental migration and architectural refactoring of a commercial mobile back end as a service to microservices architecture. It explains how the researchers adopted DevOps and how this facilitated a smooth migration.
BibTeX:
@article{balalaie-microservices:2016,
  author = {Balalaie, A. and Heydarnoori, A. and Jamshidi, P.},
  title = {Microservices Architecture Enables DevOps: Migration to a Cloud-Native Architecture},
  journal = {IEEE Software},
  year = {2016},
  volume = {33},
  number = {3},
  pages = {42--52},
  doi = {10.1109/MS.2016.64}
}
Bias R (2016), "The History of Pets vs Cattle and How to Use the Analogy Properly", Cloudscaling website, (04/2016)., April, 2016.
Abstract: I have been meaning to write this post for a long time, but one thing or another has gotten in the way. It’s important to me to provide an accurate history, ...
BibTeX:
@article{bias-history:2016,
  author = {Bias, Randy},
  title = {The History of Pets vs Cattle and How to Use the Analogy Properly},
  journal = {Cloudscaling website, (04/2016)},
  year = {2016},
  url = {http://cloudscaling.com/blog/cloud-computing/the-history-of-pets-vs-cattle/}
}
Bias R (2016), "The Rancher's Dilemma: Reconciling Pets & Cattle", Cloudscaling website, (04/2016)., April, 2016.
Abstract: When I first started promulgating the pets vs. cattle meme, it really helped me get through roadblocks of confusion. Many in IT couldn’t tell the difference ...
BibTeX:
@article{bias-ranchers:2016,
  author = {Bias, Randy},
  title = {The Rancher's Dilemma: Reconciling Pets & Cattle},
  journal = {Cloudscaling website, (04/2016)},
  year = {2016},
  url = {http://cloudscaling.com/blog/openstack/the-ranchers-dilemma/}
}
Nguyen TA, Kim DS and Park JS (2016), "Availability modeling and analysis of a data center for disaster tolerance", Future Generation Computer Systems., March, 2016. Vol. 56, pp. 27-50.
Abstract: Availability assessment of a data center with disaster tolerance (DT) is demanding for cloud computing based businesses. Previous work attempted to model and analyze the computing systems without a good consideration on disaster occurrence, unexpected failure of network connection and proper dependencies between subsystems in a data center. This paper presents a comprehensive availability model of a data center for DT using stochastic reward nets (SRN). The model incorporates (i) a typical two-level high availability (HA) configuration (i.e., active/active between sites and active/passive within a site), (ii) various fault and disaster tolerant techniques; (iii) dependencies between subsystems (e.g. between a host and virtual machines (VMs), between a network area storage (NAS) and VMs) and dependency between disastrous events and physical subsystems; and (iv) unexpected failures during data transmission between data centers. The constructed SRN model is analyzed on the basis of steady state analysis, downtime cost analysis, and sensitivity analysis with regard to major impacting parameters. The analysis results show the availability improvement of the disaster tolerant data center (DTDC) and featured system responses corresponding to the selected variables. The modeling and analysis of the DTDC in this paper provide a selection basis of designing for disasters in consideration of the trade-off between system availability and downtime cost with infrastructure construction cost.
BibTeX:
@article{nguyen-availability:2016,
  author = {Nguyen, Tuan Anh and Kim, Dong Seong and Park, Jong Sou},
  title = {Availability modeling and analysis of a data center for disaster tolerance},
  journal = {Future Generation Computer Systems},
  year = {2016},
  volume = {56},
  pages = {27--50},
  url = {http://www.sciencedirect.com/science/article/pii/S0167739X15002824},
  doi = {10.1016/j.future.2015.08.017}
}
Silva RACd and Fonseca NLSd (2016), "Topology-Aware Virtual Machine Placement in Data Centers", Journal of Grid Computing., March, 2016. Vol. 14(1), pp. 75-90.
Abstract: This paper presents the Topology-aware Virtual Machine Placement algorithm, which aims at placing groups of virtual machines in data centers. It was designed to occupy small areas of the data center network in order to consolidate the network flows produced by the virtual machines. Extensive simulation is used to show that the proposed algorithm prevents the formation of network bottlenecks, therefore accepting more requests of allocation of virtual machines. Moreover, these advantages are obtained without compromising energy efficiency. The energy consumption of servers and switches are taken into account, and these are switched off whenever idle.
BibTeX:
@article{silva-topology-aware:2016,
  author = {Silva, Rodrigo A. C. da and Fonseca, Nelson L. S. da},
  title = {Topology-Aware Virtual Machine Placement in Data Centers},
  journal = {Journal of Grid Computing},
  year = {2016},
  volume = {14},
  number = {1},
  pages = {75--90},
  url = {https://link.springer.com/article/10.1007/s10723-015-9343-x},
  doi = {10.1007/s10723-015-9343-x}
}
Toosi AN, Vanmechelen K, Khodadadi F and Buyya R (2016), "An Auction Mechanism For Cloud Spot Markets", ACM 11th Taas., February, 2016.
Abstract: Dynamic Forms Of Resource Pricing Have Recently Been Introduced By Cloud Providers That Offer Infrastructure As A Service (iaas) Capabilities In Order To Maximize Profits And Balance Resource Supply And Demand. The Design Of A Mechanism That Efficiently Prices Perishable Cloud Resources In Line With A Provider’s Profit Maximization Goal Remains An Open Research Challenge, However. In This Article, We Propose The Online Extended Consensus Revenue Estimate Mechanism In The Setting Of A Recurrent, Multiunit And Single Price Auction For Iaas Cloud Resources. The Mechanism Is Envy-free, Has A High Probability Of Being Truthful, And Generates A Near Optimal Profit For The Provider. We Combine The Proposed Auction Design With A Scheme For Dynamically Calculating Reserve Prices Based On Data Center Power Usage Effectiveness (pue) And Electricity Costs. Our Simulation-based Evaluation Of The Mechanism Demonstrates Its Effectiveness Under A Broad Variety Of Market Conditions. In Particular, We Show How It Improves On The Classical Uniform Price Auction, And We Investigate The Value Of Prior Knowledge On The Execution Time Of Virtual Machines For Maximizing Profit. We Also Developed A System Prototype And Conducted A Small-scale Experimental Study With A Group Of 10 Users That Confirms The Truthfulness Property Of The Mechanism In A Real Test Environment.
BibTeX:
@article{toosi-auction:2016,
  author = {Toosi, Adel Nadjaran And Vanmechelen, Kurt And Khodadadi, Farzad And Buyya, Rajkumar},
  title = {An Auction Mechanism For Cloud Spot Markets},
  journal = {ACM 11th Taas},
  year = {2016},
  doi = {10.1145/2843945}
}
U.S. Department Of Energy (2016), "Data Center Master List Of Energy Efficiency Actions". Thesis at: U.s. Department Of Energy. Lawrence Berkeley National Laboratory, February, 2016. (8), pp. 48.
Abstract: Comprehensive List Of Recommended Efficiency Actions For Data Centers. The Master List Also Feeds Into Our Data Center Profiler (dc Pro) Tool To Provide Tailored Recommendations For Improvement. Technologies: Cooling Air / Air Management, Monitoring And Controls, General It Equipment Power, Environmental Conditions.
BibTeX:
@techreport{usde-data:2016,
  author = {U.S. Department Of Energy},
  title = {Data Center Master List Of Energy Efficiency Actions},
  school = {U.s. Department Of Energy},
  year = {2016},
  number = {8},
  pages = {48},
  url = {https://datacenters.lbl.gov/resources/data-center-master-list-energy}
}
Butler B (2016), "And the cloud provider with the best uptime in 2015 is…", networkworld.com., January, 2016.
Abstract: Amazon’s cloud bests those of Microsoft and Google by this reliability test
BibTeX:
@article{butler-and:2016,
  author = {Butler, Brandon},
  title = {And the cloud provider with the best uptime in 2015 is…},
  journal = {networkworld.com},
  year = {2016},
  url = {https://www.networkworld.com/article/3020235/cloud-computing/and-the-cloud-provider-with-the-best-uptime-in-2015-is.html}
}
Ahmad I and Ranka S (2016), "Handbook Of Energy-aware And Green Computing - Two Volume Set" Crc Press.
Abstract: Implementing Energy-efficient Cpus And Peripherals As Well As Reducing Resource Consumption Have Become Emerging Trends In Computing. As Computers Increase In Speed And Power, Their Energy Issues Become More And More Prevalent. The Need To Develop And Promote Environmentally Friendly Computer Technologies And Systems Has Also Come To The Forefront In Computing Research. A Pioneering Publication For Researchers In Computer Science And Engineering, Handbook Of Energy-aware And Green Computing, Two-volume Set Is One Of The First To Present A Comprehensive Account Of Recent Research In Energy-aware And Green Computing. Edited By The Co-chairs Of The International Green Computing Conference, This Handbook Incorporates Fundamental Knowledge From All Related Areas, Including Circuit And Component Design, Software, Operating Systems, Networking, Mobile Computing, And Data Centers. It Also Discusses Up-to-date Research On Many Aspects Of Power-aware Computing At The Component, Software, And System Levels.
BibTeX:
@book{ahmad-handbook:2016,
  author = {Ahmad, Ishfaq And Ranka, Sanjay},
  title = {Handbook Of Energy-aware And Green Computing - Two Volume Set},
  publisher = {Crc Press},
  year = {2016}
}
Akhter N and Othman M (2016), "Energy Aware Resource Allocation Of Cloud Data Center: Review And Open Issues", Cluster Comput. Vol. 19(3), pp. 1163-1182.
Abstract: The Demand For Cloud Computing Is Increasing Dramatically Due To The High Computational Requirements Of Business, Social, Web And Scientific Applications. Nowadays, Applications And Services Are Hosted On The Cloud In Order To Reduce The Costs Of Hardware, Software And Maintenance. To Satisfy This High Demand, The Number Of Large-scale Data Centers Has Increased, Which Consumes A High Volume Of Electrical Power, Has A Negative Impact On The Environment, And Comes With High Operational Costs. In This Paper, We Discuss Many Ongoing Or Implemented Energy Aware Resource Allocation Techniques For Cloud Environments. We Also Present A Comprehensive Review On The Different Energy Aware Resource Allocation And Selection Algorithms For Virtual Machines In The Cloud. Finally, We Come Up With Further Research Issues And Challenges For Future Cloud Environments.
BibTeX:
@article{akhter-energy:2016,
  author = {Akhter, Nasrin And Othman, Mohamed},
  title = {Energy Aware Resource Allocation Of Cloud Data Center: Review And Open Issues},
  journal = {Cluster Comput},
  year = {2016},
  volume = {19},
  number = {3},
  pages = {1163--1182},
  doi = {10.1007/s10586-016-0579-4}
}
Amoon M (2016), "Adaptive Framework for Reliable Cloud Computing Environment", IEEE Access. Vol. 4, pp. 9469-9478.
BibTeX:
@article{amoon-adaptive:2016,
  author = {Amoon, Mohammed},
  title = {Adaptive Framework for Reliable Cloud Computing Environment},
  journal = {IEEE Access},
  year = {2016},
  volume = {4},
  pages = {9469--9478},
  url = {http://ieeexplore.ieee.org/document/7742909/},
  doi = {10.1109/ACCESS.2016.2623633}
}
APC Schneider Electric (2016), "Calculating Total Cooling Requirement For Data Centers - Enterprise Control Systems". Thesis at: APC Schneider Electric.
BibTeX:
@report{apc-calculating:2016,
  author = { APC Schneider Electric},
  title = {Calculating Total Cooling Requirement For Data Centers - Enterprise Control Systems},
  school = {APC Schneider Electric},
  year = {2016},
  url = {http://www.datacenterexperts.com/resources/white-papers/cooling-containment-airflow-management/126-calculating-total-cooling-requirement-for-data-centers.html}
}
Aroca JA (2016), "Eficiencia Energética En Los Centros De Datos". Thesis at: Universitat Politècnica De València. València, España
Abstract: [es] Con El Auge De La Computación En Nube, Los Centros De Datos Han Sido Llamados A Desempeñar Un Papel Principal En El Escenario De Internet Hoy En Día. A Pesar De Esta Relevancia, Están Probablemente Lejos De Su Apogeo, Debido A La Creciente Demanda De Almacenamiento Y Distribución De Contenidos En La Nube, La Necesidad De Potencia De Cálculo O Las Cantidades Cada Vez Mayores De Datos Que Están Siendo Analizados Por Las Principales Empresas Como Google, Microsoft O Amazon. Tener Un Centro De Datos Implica Dos Cuestiones Principales: Son Terriblemente Caros De Construir, Y Que Consumen Enormes Cantidades De Energía, Por Lo Tanto, Muy Caro De Mantener. Por Esta Razón, Reduciendo El Costo De La Construcción Y El Aumento De La Eficiencia Energética (y Por Lo Tanto La Reducción De La Huella De Carbono) De Los Centros De Datos Ha Sido Uno De Los Temas Más Candentes De Investigación En Los últimos Años. En Esta Tesis Se Propone Diferentes Técnicas Que Pueden Tener Un Impacto En Los Costes De Mantenimiento De Los Centros De Datos De Cualquier Tamaño, Desde Pequeñas Escala Para Grandes Centros De Datos.
BibTeX:
@mastersthesis{aroca-eficiencia:2016,
  author = {Aroca, Jorge Arjona},
  title = {Eficiencia Energética En Los Centros De Datos},
  school = {Universitat Politècnica De València},
  year = {2016}
}
Ashrae-90 (2016), "Ashrae Std 90.4 - Energy Standard For Data Centers Textbar Engineering360". Thesis at: American Society Of Heating, Refrigerating And Air-conditioning Engineers - (ASHRAE. Usa (90.4), pp. 62.
Abstract: Scope: This Standard Applies To: A. New Data Centers, Or Portions Thereof, And Their Systems; B. New Additions To Data Centers, Or Portions Thereof, And Their Systems; C. And Modifications To Systems And Equipment In Existing Data Centers Or Portions Thereof. The Provisions Of This Standard Do Not Apply To A. Telephone Exchanges, B. Essential Facilities, And C. Information Technology Equipment (ite). Where Specifically Noted In This Standard, Certain Other Buildings Or Elements Of Buildings Shall Be Exempt. This Standard Shall Not Be Used To Circumvent Any Safety, Health, Or Environmental Requirements. Purpose: The Purpose Of This Standard Is To Establish The Minimum Energy Efficiency Requirements Of Data Centers For A. Design, Construction, And A Plan For Operation And Maintenance; And B. Utilization Of On-site Or Off-site Renewable Energy Resources.
BibTeX:
@report{ashrae-90-ashrae:2016,
  author = {Ashrae-90},
  title = {Ashrae Std 90.4 - Energy Standard For Data Centers Textbar Engineering360},
  school = {American Society Of Heating, Refrigerating And Air-conditioning Engineers - (ASHRAE},
  year = {2016},
  number = {90.4},
  pages = {62},
  url = {http://standards.globalspec.com/standards/detail?docid=10037704&familyid=YMFERFAAAAAAAAAA}
}
Bala M and Devanand (2016), "Virtual Machine Migration: A Green Computing Approach in Cloud Data Centers", In Proceedings of the International Congress on Information and Communication Technology. , pp. 161-168. Springer, Singapore.
Abstract: A recent fast growing development and demand in high performance computing has brought IT technocrats on forefeet to devise energy aware mechanisms so that CO2 emission can be reduced to a great extent. The resources in cloud data centers are always over provisioned in order to meet the peak workload. These resources consume a huge amount of energy, if used in their full capacity. By dynamically adopting the green computing policies as per current workload, the energy consumption of cloud data center can be reduced. In the present study, VM migration process has been discussed and the simulation driven results for evaluation of the proposed heuristic on the basis of static upper and lower limits allowed for CPU utilization have been presented. The comparative analysis of resource utilization and power consumption of a data center, with and without migration policy, reveals that a significant amount of power consumption can be reduced by VM migration and utilization of resources can be optimized.
BibTeX:
@incollection{bala-virtual:2016,
  author = {Bala, Minu and Devanand},
  title = {Virtual Machine Migration: A Green Computing Approach in Cloud Data Centers},
  booktitle = {Proceedings of the International Congress on Information and Communication Technology},
  publisher = {Springer, Singapore},
  year = {2016},
  pages = {161--168},
  note = {DOI: 10.1007/978-981-10-0755:218},
  url = {https://link.springer.com/chapter/10.1007/978-981-10-0755:2-18}
}
Beyer B, Petoff J, Jones C and Murphy NR (2016), "Site Reliability Engineering" Sebastopol, CA O'Reilly.
Abstract: The overwhelming majority of a software system's lifespan is spent in use, not in design or implementation. So, why does conventional wisdom insist that software engineers focus primarily on the design and development of large-scale computing systems?In this collection of essays and articles, key members of Google's Site Reliability Team explain how and why their commitment to the entire lifecycle has enabled the company to successfully build, deploy, monitor, and maintain some of the largest software systems in the world. You'll learn the principles and practices that enable Google engineers to make systems more scalable, reliable, and efficient--lessons directly applicable to your organization.This book is divided into four sections: Introduction--Learn what site reliability engineering is and why it differs from conventional IT industry practicesPrinciples--Examine the patterns, behaviors, and areas of concern that influence the work of a site reliability engineer (SRE)Practices--Understand the theory and practice of an SRE's day-to-day work: building and operating large distributed computing systemsManagement--Explore Google's best practices for training, communication, and meetings that your organization can use
BibTeX:
@book{beyer-site:2016,
  author = {Beyer, Betsy and Petoff, Jennifer and Jones, Chris and Murphy, Niall Richard},
  title = {Site Reliability Engineering},
  publisher = {O'Reilly},
  year = {2016},
  edition = {1st}
}
Bureau Veritas Certificação (2016), "Bureau Veritas Certificação"
BibTeX:
@online{bureau-veritas-certificacao-bureau:2016,
  author = {Bureau Veritas Certificação},
  title = {Bureau Veritas Certificação},
  year = {2016},
  url = {http://www.bureauveritascertification.com.br/}
}
Camargo DS and Miers CC (2016), "GreenHop: Open source PUE continuous monitoring for small and medium data centers", In 2016 XLII Latin American Computing Conference (CLEI). , pp. 1-12.
Abstract: This paper presents the GreenHop solution, focused on energy and environmental monitoring in small and medium size Data Centers (DCs). The solution enables the DC administrator to monitor the DC PUE (Power Usage Effectiveness) and maintain the environmental parameters in compliance with standards and good practice guidances. The solution is applied in a case study, showing the consumption of the cooling equipment when setting the DC operating temperature on 18°C, 23°C, 25°C, and free cooling. Thus, we identified an improvement on PUE and reduction of power consumption as a result of the adoption GreenHop monitoring solution.
BibTeX:
@inproceedings{camargo-greenhop::2016,
  author = {Camargo, D. S. and Miers, C. C.},
  title = {GreenHop: Open source PUE continuous monitoring for small and medium data centers},
  booktitle = {2016 XLII Latin American Computing Conference (CLEI)},
  year = {2016},
  pages = {1--12},
  doi = {10.1109/CLEI.2016.7833381}
}
Camargo DS, Miers CC, Koslovski GP and Pillon MA (2016), "GreenHop: Open source environmental monitoring for small and medium data centers", In 2016 35th International Conference of the Chilean Computer Science Society (SCCC). , pp. 1-12.
Abstract: Several organizations carry out the execution of their systems and data processing services in local processing centers, i.e., data centers (DC). However, small to medium - sized organizations usually do not have technical and financial conditions to monitor the climatic conditions of their DC. Thus, this may imply from excessive energy costs, drastic reduction of the life spam of the equipment and the incorrect processing of data and systems. In this paper we present the open source based solution GreenHop that aims to perform environmental monitoring of the DC server room. This paper presents four tests with different temperature settings, which is also included the application of free cooling method. We present a comparison of the energy impact of all settings. Thus, we aim to provide ambiental monitoring of the DC server room while we keep the system customizable to implement and replicate.
BibTeX:
@inproceedings{camargo-greenhop::2016-1,
  author = {Camargo, D. S. and Miers, C. C. and Koslovski, G. P. and Pillon, M. A.},
  title = {GreenHop: Open source environmental monitoring for small and medium data centers},
  booktitle = {2016 35th International Conference of the Chilean Computer Science Society (SCCC)},
  year = {2016},
  pages = {1--12},
  doi = {10.1109/SCCC.2016.7836064}
}
Cascella GL, Cupertino F and Davide C (2016), "Energy Metering Optimization In Flour Mill Plants For Iso 50001 Implementation", In 2016 IEEE Workshop On Environmental, Energy, And Structural Monitoring Systems (eesms). , pp. 1-5. IEEE.
Abstract: This Paper Proposes An Innovative Strategy To Optimize The Energy Metering In Large And Energy-consuming Plants Such As Industrial Flour Mills. The Proposed Solution Deals With Iso 50001 Implementation Which Represents A Critical Challenge For Many Companies Because The Benefits Due To Improvements In Energy Management Could Be Potentially Canceled By The Costs Of An Energy Management System (enms). In Particular, The Key Performance Indexes (kpis) Monitoring Is A Crucial Activity For Several Reasons: It Is One Of The Early Activities, It Affects The Measurement Quality Of Kpis And It Deeply Impacts The Enms Requirements And, Consequently, The Investment Valuations. The Proposed Strategy Supports The Energy Managers In The Design Of The Energy Monitoring System Suggesting The Points Of The Electrical Network To Be Equipped With Sensors And Monitored. Moreover, The Paper Describes The Results Carried Out In A Real-world Application: The Energy Sensor Network Of A 1.2mw Flour Mill Plant Sited In Italy Has Been Designed And Implemented With The Proposed Innovative Solution.
BibTeX:
@inproceedings{cascella-energy:2016,
  author = {Cascella, G. L. And Cupertino, F. And Davide, C.},
  title = {Energy Metering Optimization In Flour Mill Plants For Iso 50001 Implementation},
  booktitle = {2016 IEEE Workshop On Environmental, Energy, And Structural Monitoring Systems (eesms)},
  publisher = {IEEE},
  year = {2016},
  pages = {1--5},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504810},
  doi = {10.1109/EESMS.2016.7504810}
}
Cherrafi A, Elfezazi S, Chiarini A, Mokhlis A and Benhida K (2016), "The Integration Of Lean Manufacturing, Six Sigma And Sustainability: A Literature Review And Future Research Directions For Developing A Specific Model", Journal Of Cleaner Production. Vol. 139, pp. 828-846.
Abstract: The Purpose Of This Paper Is To Present A Review And An Analysis Of The Literature Concerning A Possible Model For Integrating Three Management Systems: Lean Manufacturing, Six Sigma And Sustainability. In Particular, We Analyzed Current Proposals And Identified At The Same Time Gaps In The Existing Literature From Which We Suggested Future Research Directions For Developing A Specific Integrated Model, Suggesting New Opportunities And Challenges That Should Be Addressed By Future Studies. Both Academicians And Practitioners Will Find Our Review Useful Because It Outlines The Major Lines Of Research In The Field And Their Limitations.
BibTeX:
@article{cherrafi-integration:2016,
  author = {Cherrafi, Anass And Elfezazi, Said And Chiarini, Andrea And Mokhlis, Ahmed And Benhida, Khalid},
  title = {The Integration Of Lean Manufacturing, Six Sigma And Sustainability: A Literature Review And Future Research Directions For Developing A Specific Model},
  journal = {Journal Of Cleaner Production},
  year = {2016},
  volume = {139},
  pages = {828--846},
  url = {http://www.sciencedirect.com/science/article/pii/S0959652616312495},
  doi = {10.1016/j.jclepro.2016.08.101}
}
Chugani N, Kumar V, Garza-reyes JA, Rocha-lona L and Upadhyay A (2016), "Investigating The Green Impact Of Lean, Six Sigma, And Lean Six Sigma: A Systematic Literature Review", International Journal Of Lean Six Sigma.
BibTeX:
@article{chugani-investigating:2016,
  author = {Chugani, Nashmi And Kumar, Vikas And Garza-reyes, Jose Arturo And Rocha-lona, Luis And Upadhyay, Arvind},
  title = {Investigating The Green Impact Of Lean, Six Sigma, And Lean Six Sigma: A Systematic Literature Review},
  journal = {International Journal Of Lean Six Sigma},
  year = {2016},
  url = {http://derby.openrepository.com/derby/handle/10545/605693}
}
Cisco (2016), "Calculating Power For Cisco Stackpower And The Cisco Catalyst 3750-x Series Switches White Paper"
Abstract: Thought Leadership Document Designed To Convince Or Influence Customers To Do Business With Cisco Based On Particular Solution.
BibTeX:
@online{cisco-calculating:2016,
  author = {Cisco},
  title = {Calculating Power For Cisco Stackpower And The Cisco Catalyst 3750-x Series Switches White Paper},
  year = {2016},
  url = {http://www.cisco.com/c/en/us/products/collateral/switches/catalyst-3750-x-series-switches/white_paper_c11-663717.html}
}
Daimi K, Arabnia HR, Margaria-Steffen T and Solo AMG (2016), "SAM 2016: proceedings of the 2016 International Conference on Security & Management: WORLDCOMP '16, July 25-28, 2016, Las Vegas, Nevada, USA" Erscheinungsort nicht ermittelbar CSREA Press.
BibTeX:
@book{daimi-sam:2016,
  author = {Daimi, Kevin and Arabnia, Hamid R. and Margaria-Steffen, Tiziana and Solo, Ashu M. G.},
  editor = {International Conference on Security and Management},
  title = {SAM 2016: proceedings of the 2016 International Conference on Security & Management: WORLDCOMP '16, July 25-28, 2016, Las Vegas, Nevada, USA},
  publisher = {CSREA Press},
  year = {2016}
}
National Renewable Energy Laboratory (2016), "Nrel: Energy Analysis - Life Cycle Assessment Harmonization"
BibTeX:
@online{energy-nrel:2016,
  author = {National Renewable Energy Laboratory},
  editor = {National Renewable Energy Laboratory},
  title = {Nrel: Energy Analysis - Life Cycle Assessment Harmonization},
  year = {2016},
  url = {http://www.nrel.gov/analysis/sustain_lcah.html}
}
Enokido T and Takizawa M (2016), "An Energy-efficient Load Balancing Algorithm For Virtual Machine Environments To Perform Communication Type Application Processes", In 2016 IEEE 30th International Conference On Advanced Information Networking And Applications (aina). , pp. 392-399.
Abstract: Scalable, High Performance, And Fault-tolerant Distributed Applications Are Realized With Virtual Machines In Server Cluster Systems. Application Processes Are Performed On Virtual Machines In Each Server. Processing Load Of Virtual Machines To Perform Application Processes Has To Be Balanced In A Server Cluster System To Satisfy The Application Requirements Like Response Time. On The Other Hand, A Server Cluster System Consumes A Large Amount Of Electric Energy Since Multiple Servers Consume Electric Energy To Perform Application Processes. It Is Critical To Discuss How To Reduce The Total Electric Energy Consumption Of A Server Cluster To Perform Application Processes On Virtual Machines. In Our Previous Studies, The Transmission Model And Power Consumption Model Of A Server To Perform Communication Processes On Multiple Virtual Machines Are Proposed. In This Paper, We Newly Propose The Transmission Energy Consumption Laxity Based (teclb) Algorithm To Allocate Communication Processes To Virtual Machines In A Server Cluster Based On The Proposed Transmission Model And Power Consumption Model Of A Server So That The Total Energy Consumption Of A Server Cluster Can Be Reduced. We Evaluate The Teclb Algorithm In Terms Of The Total Energy Consumption Of A Server Cluster And Transmission Time Of Each Process Compared With The Basic Round-robin (rr) Algorithm. The Evaluation Results Show The Average Total Energy Consumption Of A Server Cluster Is Maximumly Reduced To 9% Of The Rr Algorithm.
BibTeX:
@inproceedings{enokido-energy-efficient:2016,
  author = {Enokido, T. And Takizawa, M.},
  title = {An Energy-efficient Load Balancing Algorithm For Virtual Machine Environments To Perform Communication Type Application Processes},
  booktitle = {2016 IEEE 30th International Conference On Advanced Information Networking And Applications (aina)},
  year = {2016},
  pages = {392--399},
  doi = {10.1109/AINA.2016.78}
}
(2016), "Principles of Performance and Reliability Modeling and Evaluation" Cham Springer International Publishing.
BibTeX:
@book{fiondella-principles:2016,,
  editor = {Fiondella, Lance and Puliafito, Antonio},
  title = {Principles of Performance and Reliability Modeling and Evaluation},
  publisher = {Springer International Publishing},
  year = {2016},
  note = {DOI: 10.1007/978-3-319-30599-8},
  url = {http://link.springer.com/10.1007/978-3-319-30599-8}
}
Ghobaei-arani M, Jabbehdari S and Pourmina MA (2016), "An Autonomic Approach For Resource Provisioning Of Cloud Services", Cluster Comput. Vol. 19(3), pp. 1017-1036.
Abstract: Recently, There Has Been A Significant Increase In The Use Of Cloud-based Services That Are Offered In Software As A Service (saas) Models By Saas Providers, And Irregular Access Of Different Users To These Cloud Services Leads To Fluctuations In The Demand Workload. It Is Difficult To Determine The Suitable Amount Of Resources Required To Run Cloud Services In Response To The Varying Workloads, And This May Lead To Undesirable States Of Over-provisioning And Under-provisioning. In This Paper, We Address Improvements To Resource Provisioning For Cloud Services By Proposing An Autonomic Resource Provisioning Approach That Is Based On The Concept Of The Control Monitor-analyze-plan-execute (mape) Loop, And We Design A Resource Provisioning Framework For Cloud Environments. The Experimental Results Show That The Proposed Approach Reduces The Total Cost By Up To 35 The Number Of Service Level Agreement (sla) Violations By Up To 40 And Increases The Resource Utilization By Up To 25 % Compared With The Other Approaches.
BibTeX:
@article{ghobaei-arani-autonomic:2016,
  author = {Ghobaei-arani, Mostafa And Jabbehdari, Sam And Pourmina, Mohammad Ali},
  title = {An Autonomic Approach For Resource Provisioning Of Cloud Services},
  journal = {Cluster Comput},
  year = {2016},
  volume = {19},
  number = {3},
  pages = {1017--1036},
  doi = {10.1007/s10586-016-0574-9}
}
Hameed A, Khoshkbarforoushha A, Ranjan R, Jayaraman PP, Kolodziej J, Balaji P, Zeadally S, Malluhi QM, Tziritas N, Vishnu A, Khan SU and Zomaya A (2016), "A Survey And Taxonomy On Energy Efficient Resource Allocation Techniques For Cloud Computing Systems", Computing. Vol. 98(7), pp. 751-774.
Abstract: In A Cloud Computing Paradigm, Energy Efficient Allocation Of Different Virtualized Ict Resources (servers, Storage Disks, And Networks, And The Like) Is A Complex Problem Due To The Presence Of Heterogeneous Application (e.g., Content Delivery Networks, Mapreduce, Web Applications, And The Like) Workloads Having Contentious Allocation Requirements In Terms Of Ict Resource Capacities (e.g., Network Bandwidth, Processing Speed, Response Time, Etc.). Several Recent Papers Have Tried To Address The Issue Of Improving Energy Efficiency In Allocating Cloud Resources To Applications With Varying Degree Of Success. However, To The Best Of Our Knowledge There Is No Published Literature On This Subject That Clearly Articulates The Research Problem And Provides Research Taxonomy For Succinct Classification Of Existing Techniques. Hence, The Main Aim Of This Paper Is To Identify Open Challenges Associated With Energy Efficient Resource Allocation. In This Regard, The Study, First, Outlines The Problem And Existing Hardware And Software-based Techniques Available For This Purpose. Furthermore, Available Techniques Already Presented In The Literature Are Summarized Based On The Energy-efficient Research Dimension Taxonomy. The Advantages And Disadvantages Of The Existing Techniques Are Comprehensively Analyzed Against The Proposed Research Dimension Taxonomy Namely: Resource Adaption Policy, Objective Function, Allocation Method, Allocation Operation, And Interoperability.
BibTeX:
@article{hameed-survey:2016,
  author = {Hameed, Abdul And Khoshkbarforoushha, Alireza And Ranjan, Rajiv And Jayaraman, Prem Prakash And Kolodziej, Joanna And Balaji, Pavan And Zeadally, Sherali And Malluhi, Qutaibah Marwan And Tziritas, Nikos And Vishnu, Abhinav And Khan, Samee U. And Zomaya, Albert},
  title = {A Survey And Taxonomy On Energy Efficient Resource Allocation Techniques For Cloud Computing Systems},
  journal = {Computing},
  year = {2016},
  volume = {98},
  number = {7},
  pages = {751--774},
  doi = {10.1007/s00607-014-0407-8}
}
He M, Pang S, Lavrov D, Lu D, Zhang Y and Sarrafzadeh A (2016), "Reverse Replication of Virtual Machines (rRVM) for Low Latency and High Availability Services", In 2016 IEEE/ACM 9th International Conference on Utility and Cloud Computing (UCC). , pp. 118-127.
Abstract: Virtualization supplies a straightforward approach to high availability through iterative replications of virtual machine (VM) checkpoints that encapsulate the protected services. Unfortunately, traditional VM replication solutions suffer from deficiencies in either response latency or state recovery consistency, which constrains the adoption of VM replication in production. In this paper, we extend the function of the secondary host to be the primary recipient of network requests so that the state of the primary VM (PVM) is retained by the secondary host in the form of network packets. In doing this, we redesign the typical consistency model and network architecture for virtual machine replication. Specifically, the secondary host is set for network redirection and packets recording. Should the primary host fail, the recorded packets are used to recreate the state on the secondary host. Experiments in this research demonstrate simultaneously strong recovery consistency and low response latency in our real-time fault tolerance system. We name the system reverse replication of virtual machines (rRVM).
BibTeX:
@inproceedings{he-reverse:2016,
  author = {He, M. and Pang, S. and Lavrov, D. and Lu, D. and Zhang, Y. and Sarrafzadeh, A.},
  title = {Reverse Replication of Virtual Machines (rRVM) for Low Latency and High Availability Services},
  booktitle = {2016 IEEE/ACM 9th International Conference on Utility and Cloud Computing (UCC)},
  year = {2016},
  pages = {118--127}
}
Iorga M and Karmel A (2016), "Cloud Computing Security Essentials and Architecture" Boca Raton CRC Press.
Abstract: This chapter discusses the essential security challenges and requirements for cloud consumers that intend to adopt cloud-based solutions for their information systems.
BibTeX:
@book{iorga-cloud:2016,
  author = {Iorga, Michaela and Karmel, Anil},
  editor = {Vacca, John R.},
  title = {Cloud Computing Security Essentials and Architecture},
  publisher = {CRC Press},
  year = {2016},
  edition = {1st, Chapter 3},
  url = {https://csrc.nist.gov/publications/detail/book/2016/cloud-computing-security-essentials-and-architecture}
}
Iso/iec (2016), "Iso 50001 - Energy Management"
BibTeX:
@online{iso50001:2016,
  author = {Iso/iec},
  editor = {Iso/iec},
  title = {Iso 50001 - Energy Management},
  year = {2016},
  url = {http://www.iso.org/iso/home/standards/management-standards/iso50001.htm}
}
Iso/iec (2016), "Iso/iec 30134-2:2016 - Information Technology -- Data Centres -- Key Performance Indicators -- Part 2: Power Usage Effectiveness (pue)".
BibTeX:
@misc{isoiec-30134-2:2016,
  author = {Iso/iec},
  editor = {Iso/iec},
  title = {Iso/iec 30134-2:2016 - Information Technology -- Data Centres -- Key Performance Indicators -- Part 2: Power Usage Effectiveness (pue)},
  year = {2016},
  url = {http://www.iso.org/iso/catalogue_detail.htm?csnumber=63451}
}
Jiang Y (2016), "A Survey Of Task Allocation And Load Balancing In Distributed Systems", IEEE Transactions On Parallel And Distributed Systems. Vol. 27(2), pp. 585-599.
Abstract: In Past Decades, Significant Attention Has Been Devoted To The Task Allocation And Load Balancing In Distributed Systems. Although There Have Been Some Related Surveys About This Subject, Each Of Which Only Made A Very Preliminary Review On The State Of Art Of One Single Type Of Distributed Systems. To Correlate The Studies In Varying Types Of Distributed Systems And Make A Comprehensive Taxonomy On Them, This Survey Mainly Categorizes And Reviews The Representative Studies On Task Allocation And Load Balancing According To The General Characteristics Of Varying Distributed Systems. First, This Survey Summarizes The General Characteristics Of Distributed Systems. Based On These General Characteristics, This Survey Reviews The Studies On Task Allocation And Load Balancing With Respect To The Following Aspects: 1) Typical Control Models; 2) Typical Resource Optimization Methods; 3) Typical Methods For Achieving Reliability; 4) Typical Coordination Mechanisms Among Heterogeneous Nodes; And 5) Typical Models Considering Network Structures. For Each Aspect, We Summarize The Existing Studies And Discuss The Future Research Directions. Through The Survey, The Related Studies In This Area Can Be Well Understood Based On How They Can Satisfy The General Characteristics Of Distributed Systems.
BibTeX:
@article{jiang-survey:2016,
  author = {Jiang, Y.},
  title = {A Survey Of Task Allocation And Load Balancing In Distributed Systems},
  journal = {IEEE Transactions On Parallel And Distributed Systems},
  year = {2016},
  volume = {27},
  number = {2},
  pages = {585--599},
  doi = {10.1109/TPDS.2015.2407900}
}
Kommeri J, Niemi T and Nurminen JK (2016), "Energy efficiency of dynamic management of virtual cluster with heterogeneous hardware", The Journal of Supercomputing. , pp. 1-23.
Abstract: Cloud computing is an essential part of today's computing world. Continuously increasing amount of computation with varying resource requirements is placed in large data centers. The variation among computing tasks, both in their resource requirements and time of processing, makes it possible to optimize the usage of physical hardware by applying cloud technologies. In this work, we develop a prototype system for load-based management of virtual machines in an OpenStack computing cluster. Our prototype is based on an idea of `packing' idle virtual machines into special park servers optimized for this purpose. We evaluate the method by running real high-energy physics analysis software in an OpenStack test cluster and by simulating the same principle using the Cloudsim simulator software. The results show a clear improvement, 9–48 % , in the total energy efficiency when using our method together with resource overbooking and heterogeneous hardware.
BibTeX:
@article{kommeri-energy:2016,
  author = {Kommeri, Jukka and Niemi, Tapio and Nurminen, Jukka K.},
  title = {Energy efficiency of dynamic management of virtual cluster with heterogeneous hardware},
  journal = {The Journal of Supercomputing},
  year = {2016},
  pages = {1-23},
  url = {http://dx.doi.org/10.1007/s11227-016-1899-0},
  doi = {10.1007/s11227-016-1899-0}
}
Kong W and Luo Y (2016), "Multi-level image software assembly technology based on OpenStack and Ceph", In 2016 IEEE Information Technology, Networking, Electronic and Automation Control Conference. , pp. 307-310.
Abstract: With the development of virtualization technology, it is a prevailing trend that users deploy highly reliable, scalable, high efficiency application services via virtual machines. OpenStack, the most fiery open-source virtual machines management platform, focuses on IaaS(Infrastructure as a Service), and now expands to PaaS(Platform as a Service) and SaaS(Software as a Service). As a Mature IaaS cloud computing product, it's no doubt that it should take data replication and storage mechanism into consideration as a precondition, in order to ensure the integrity and continuity of service provided for users. At the mention of storage, Ceph, a unified, high reliability, high performance and scalable distributed file system, comes to OpenStack's mind, providing block storage, object storage and file storage. To solve the problem that OpenStack only supported Single-level image management which wastes a fairly large number of storage space, we have studied Ceph storage cluster with OpenStack integration architecture and achieved the multi-level image software assembly technology. From the aspect of storage space, the technology eliminate duplication of shared operating system image data and shared application software image data. From the aspect of time, users don't have to install the required application softwares manually, providing the time-saving as well. Once you launched your virtual machine instances, the required softwares have been installed already.
BibTeX:
@inproceedings{kong-multi-level:2016,
  author = {Kong, W. and Luo, Y.},
  title = {Multi-level image software assembly technology based on OpenStack and Ceph},
  booktitle = {2016 IEEE Information Technology, Networking, Electronic and Automation Control Conference},
  year = {2016},
  pages = {307--310},
  doi = {10.1109/ITNEC.2016.7560371}
}
Kumar MRV and Raghunathan S (2016), "Heterogeneity And Thermal Aware Adaptive Heuristics For Energy Efficient Consolidation Of Virtual Machines In Infrastructure Clouds", Journal Of Computer And System Sciences. Vol. 82(2), pp. 191-212.
Abstract: Holistic Datacenter Energy Minimization Operation Should Consider Interactions Between Computing And Cooling Source Specific Usage Patterns. Decisions Like Workload Type, Server Configuration, Load, Utilization Etc., Contributes To Power Consumption And Influences Datacenter's Thermal Profile And Impacts The Energy Required To Control Temperature Within Operational Thresholds. In This Paper, We Present An Adaptive Virtual Machine Placement And Consolidation Approach To Improve Energy Efficiency Of A Cloud Datacenter; Accounting For Server Heterogeneity, Server Processor Low-power Sleep State, State Transition Latency And Integrated Thermal Controls To Maintain Datacenter Within Operational Temperature. Our Proposed Heuristic Approach Reduces Energy Consumption With Acceptable Level Of Performance.
BibTeX:
@article{kumar-heterogeneity:2016,
  author = {Kumar, Mohan Raj Velayudhan And Raghunathan, Shriram},
  title = {Heterogeneity And Thermal Aware Adaptive Heuristics For Energy Efficient Consolidation Of Virtual Machines In Infrastructure Clouds},
  journal = {Journal Of Computer And System Sciences},
  year = {2016},
  volume = {82},
  number = {2},
  pages = {191--212},
  url = {http://www.sciencedirect.com/science/article/pii/S002200001500080X},
  doi = {10.1016/j.jcss.2015.07.005}
}
Lee HC and Lin HH (2016), "Design And Evaluation Of An Open-source Wireless Mesh Networking Module For Environmental Monitoring", IEEE Sensors Journal. Vol. 16(7), pp. 2162-2171.
Abstract: Wireless Mesh Networking Extends The Communication Range Among Cooperating Multiple Low-power Wireless Radio Transceivers And Is Useful For Collecting Data From Sensors Widely Distributed Over A Large Area. By Integrating An Off-the-shelf Wireless Design, Such As The Xbee Module, Development Of Sensor Systems With Mesh Networking Capability Can Be Accelerated. This Study Introduces An Open-source Wireless Mesh Network (wmn) Module, Which Integrates The Functions Of Network Discovery, Automatic Routing Control, And Transmission Scheduling. In Addition, This Design Is Open Source In Order To Promote The Use Of Wireless Mesh Networking For Environmental Monitoring Applications. Testing Of The Design And The Proposed Networking Module Is Reported. The Proposed Wireless Mesh Networking Module Was Evaluated And Compared With Xbee. The Average Package Delivery Ratio And Standard Deviation Of The Proposed Wmn Module And The Xbee Are 94.09 91.19 5.14 And 10.25 Respectively, In A 20 Node Experiment. The Proposed System Was Demonstrated To Have The Advantages Of Low-cost Combined With High Reliability And Performance, And Can Aid Scientists In Implementing Monitoring Applications Without The Complications Of Complex Wireless Networking Issues.
BibTeX:
@article{lee-design:2016,
  author = {Lee, H. C. And Lin, H. H.},
  title = {Design And Evaluation Of An Open-source Wireless Mesh Networking Module For Environmental Monitoring},
  journal = {IEEE Sensors Journal},
  year = {2016},
  volume = {16},
  number = {7},
  pages = {2162--2171},
  doi = {10.1109/JSEN.2015.2507596}
}
Liu Q, Ma Y, Alhussein M, Zhang Y and Peng L (2016), "Green Data Center With Iot Sensing And Cloud-assisted Smart Temperature Control System", Computer Networks. Vol. 101, pp. 104-112.
Abstract: With The Growing Shortage Of Energy Around The World, Energy Efficiency Is One Of The Most Important Considerations For A Data Center. In This Paper, We Propose A Green Data Center Air Conditioning System Assisted By Cloud Techniques, Which Consists Of Two Subsystems: A Data Center Air Conditioning System And A Cloud Management Platform. The Data Center Air Conditioning System Includes Environment Monitoring, Air Conditioning, Ventilation And Temperature Control, Whereas The Cloud Platform Provides Data Storage And Analysis To Support Upper-layer Applications. Moreover, The Detailed Design And Implementation Are Presented, Including The Dispatch Algorithm For The Temperature Control, Topological Structure Of The Sensor Network, And Framework For The Environment Monitoring Node. A Feasibility Evaluation Is Used To Verify That The Proposed System Can Significantly Reduce The Data Center Energy Consumption Without Degradation In The Cooling Performance.
BibTeX:
@article{liu-green:2016,
  author = {Liu, Qiang And Ma, Yujun And Alhussein, Musaed And Zhang, Yin And Peng, Limei},
  title = {Green Data Center With Iot Sensing And Cloud-assisted Smart Temperature Control System},
  journal = {Computer Networks},
  year = {2016},
  volume = {101},
  pages = {104--112},
  url = {http://linkinghub.elsevier.com/retrieve/pii/S1389128615004739},
  doi = {10.1016/j.comnet.2015.11.024}
}
Maciel PRM (2016), "Modeling Availability Impact in Cloud Computing", In Principles of Performance and Reliability Modeling and Evaluation. , pp. 287-320. Springer, Cham.
Abstract: Internet-based services have become critical to several businesses in which many aspects of our lives depend on (e.g., online banking, collaborative work, videoconferencing). Business continuity is a remarkable property and it is a chief concern for many companies, since service disruption may cause huge revenue and market share losses. In recent years, cloud computing has turned into a remarkable alternative due to its resource on-demand and pay-as-you-go models. More specifically, additional resources, such as virtual machines (VMs), are only allocated when disaster takes place, and the automated virtual platform also performs a transparent recovery to minimize the service time to restore. This chapter presents availability models to evaluate cloud computing infrastructures.
BibTeX:
@incollection{maciel-modeling:2016,
  author = {Maciel, Paulo Romero Martins},
  title = {Modeling Availability Impact in Cloud Computing},
  booktitle = {Principles of Performance and Reliability Modeling and Evaluation},
  publisher = {Springer, Cham},
  year = {2016},
  pages = {287--320},
  note = {DOI: 10.1007/978-3-319-30599-811},
  url = {https://link.springer.com/chapter/10.1007/978-3-319-30599-8:11}
}
Mesbahi MR, Hashemi M and Rahmani AM (2016), "Performance Evaluation And Analysis Of Load Balancing Algorithms In Cloud Computing Environments", In 2016 Second International Conference On Web Research (icwr). , pp. 145-151.
Abstract: Distributing The System Workload And Balancing All Incoming Requests Among All Processing Nodes In Cloud Computing Environments Is One Of The Important Challenges In Today Cloud Computing World. Many Load Balancing Algorithms And Approaches Have Been Proposed For Distributed And Cloud Computing Systems. In Addition The Broker Policy For Distributing The Workload Among Different Datacenters In A Cloud Environment Is One Of The Important Factors For Improving The System Performance. In This Paper We Present An Analytical Comparison For The Combinations Of Vm Load Balancing Algorithms And Different Broker Policies. We Evaluate These Approaches By Simulating On Cloudanalyst Simulator And The Final Results Are Presented Based On Different Parameters. The Results Of This Research Specify The Best Possible Combinations.
BibTeX:
@inproceedings{mesbahi-performance:2016,
  author = {Mesbahi, M. R. And Hashemi, M. And Rahmani, A. M.},
  title = {Performance Evaluation And Analysis Of Load Balancing Algorithms In Cloud Computing Environments},
  booktitle = {2016 Second International Conference On Web Research (icwr)},
  year = {2016},
  pages = {145--151},
  doi = {10.1109/ICWR.2016.7498459}
}
Monil MAH and Rahman RM (2016), "Vm Consolidation Approach Based On Heuristics, Fuzzy Logic, And Migration Control", Journal Of Cloud Computing. Vol. 5(1), pp. 8.
Abstract: To Meet The Increasing Demand Of Computational Power, At Present It Service Providers’ Should Choose Cloud Based Services For Its Flexibility, Reliability And Scalability. More And More Datacenters Are Being Built To Cater Customers’ Need. However, The Datacenters Consume Large Amounts Of Energy, And This Draws Negative Attention. To Address Those Issues, Researchers Propose Energy Efficient Algorithms That Can Minimize Energy Consumption While Keeping The Quality Of Service (qos) At A Satisfactory Level. Virtual Machine Consolidation Is One Such Technique To Ensure Energy-qos Balance. In This Research, We Explore Fuzzy Logic And Heuristic Based Virtual Machine Consolidation Approach To Achieve Energy-qos Balance. A Fuzzy Vm Selection Method Is Proposed In This Research. It Selects Vm From An Overloaded Host. Additionally, We Incorporate Migration Control In Fuzzy Vm Selection Method That Will Enhance The Performance Of The Selection Strategy. A New Overload Detection Algorithm Has Also Been Proposed Based On Mean, Median And Standard Deviation Of Utilization Of Vms. We Have Used Cloudsim Toolkit To Simulate Our Experiment And Evaluate The Performance Of The Proposed Algorithm On Real-world Work Load Traces Of Planet Lab Vms. Simulation Results Demonstrate That The Proposed Method Is Most Energy Efficient Compared To Others.
BibTeX:
@article{monil-vm:2016,
  author = {Monil, Mohammad Alaul Haque And Rahman, Rashedur M.},
  title = {Vm Consolidation Approach Based On Heuristics, Fuzzy Logic, And Migration Control},
  journal = {Journal Of Cloud Computing},
  year = {2016},
  volume = {5},
  number = {1},
  pages = {8},
  doi = {10.1186/s13677-016-0059-7}
}
Mosa A and Paton NW (2016), "Optimizing VM placement for energy and SLA in clouds using utility functions", Journal of Cloud Computing. Vol. 5
Abstract: Cloud computing provides on-demand access to a shared pool of computing resources, which enables organizations to outsource their IT infrastructure. Cloud providers are building data centers to handle the continuous increase in cloud users’ demands. Consequently, these cloud data centers consume, and have the potential to waste, substantial amounts of energy. This energy consumption increases the operational cost and the CO2 emissions. The goal of this paper is to develop an optimized energy and SLA-aware virtual machine (VM) placement strategy that dynamically assigns VMs to Physical Machines (PMs) in cloud data centers. This placement strategy co-optimizes energy consumption and service level agreement (SLA) violations. The proposed solution adopts utility functions to formulate the VM placement problem. A genetic algorithm searches the possible VMs-to-PMs assignments with a view to finding an assignment that maximizes utility. Simulation results using CloudSim show that the proposed utility-based approach reduced the average energy consumption by approximately 6 % and the overall SLA violations by more than 38 using fewer VM migrations and PM shutdowns, compared to a well-known heuristics-based approach.
BibTeX:
@article{mosa-optimizing:2016,
  author = {Mosa, Abdelkhalik and Paton, Norman W.},
  title = {Optimizing VM placement for energy and SLA in clouds using utility functions},
  journal = {Journal of Cloud Computing},
  year = {2016},
  volume = {5},
  doi = {10.1186/s13677-016-0067-7}
}
Newcombe L, Acton M, Bertoldi P, Booth J, Flucker S and Rouyer A (2016), "2016 Best Practice Guidelines For The Eu Code Of Conduct On Data Centre Energy Efficiency". Thesis at: European Commission. Joint Research Centre (Version 7.1.2), pp. 50.
Abstract: This Code Of Conduct Is A Voluntary Initiative Aimed To Bring Interested Stakeholders Together, Including The Coordination Of Other Similar Activities By Manufacturers, Vendors, Consultants And Utilities. Parties Signing Up Will Be Expected To Follow The Intent Of This Code Of Conduct And Abide By A Set Of Agreed Commitments.
BibTeX:
@report{newcombe-2016:2016,
  author = {Newcombe, Liam And Acton, Mark And Bertoldi, Paolo And Booth, John And Flucker, Sophia And Rouyer, Andre},
  title = {2016 Best Practice Guidelines For The Eu Code Of Conduct On Data Centre Energy Efficiency},
  school = {European Commission},
  year = {2016},
  number = {Version 7.1.2},
  pages = {50},
  url = {http://iet.jrc.ec.europa.eu/energyefficiency/ict-codes-conduct/data-centres-energy-efficiency}
}
Ngenzi A, Selvarani R and Suchithra R (2016), "Framework of Resource Management using Server Consolidation to Minimize Live Migration and Load Balancing", Framework. Vol. 7(11)
BibTeX:
@article{ngenzi-framework:2016,
  author = {Ngenzi, Alexander and Selvarani, R. and Suchithra, R.},
  title = {Framework of Resource Management using Server Consolidation to Minimize Live Migration and Load Balancing},
  journal = {Framework},
  year = {2016},
  volume = {7},
  number = {11},
  url = {https://www.researchgate.net/profile/Alexander_Ngenzi/publication/311385728_Framework_of_Resource_Management_using_Server_Consolidation_to_Minimize_Live_Migration_and_Load_Balancing/links/5843c4e208aeda696815c64e.pdf}
}
Padoin EL (2016), "Energy-aware load balancing approaches to improve energy efficiency on HPC systems". Thesis at: UFRGS. Porto Alegre RS
BibTeX:
@phdthesis{padoin-energy-aware:2016,
  author = {Padoin, Edson Luiz},
  title = {Energy-aware load balancing approaches to improve energy efficiency on HPC systems},
  school = {UFRGS},
  year = {2016},
  url = {http://www.lume.ufrgs.br/handle/10183/140401}
}
Patel SD and Vaghela D (2016), "A Survey Paper On Load Balancing Algorithms For Resource Management In Virtualization", International Journal For Innovative Research In Science And Technology. Vol. 2(11), pp. 68-70.
BibTeX:
@article{patel-survey:2016,
  author = {Patel, Suruchi Dilipbhai And Vaghela, Dinesh},
  title = {A Survey Paper On Load Balancing Algorithms For Resource Management In Virtualization},
  journal = {International Journal For Innovative Research In Science And Technology},
  year = {2016},
  volume = {2},
  number = {11},
  pages = {68--70},
  url = {http://www.ijirst.org/Article.php?manuscript=IJIRSTV2I11013}
}
Pegus Ii P, Varghese B, Guo T, Irwin D, Shenoy P, Mahanti A, Culbert J, Goodhue J and Hill C (2016), "Analyzing The Efficiency Of A Green University Data Center", In 7th ACM/SPEC. New York, Ny, Usa ACM.
Abstract: Data Centers Are An Indispensable Part Of Today's It Infrastructure. To Keep Pace With Modern Computing Needs, Data Centers Continue To Grow In Scale And Consume Increasing Amounts Of Power. While Prior Work On Data Centers Has Led To Significant Improvements In Their Energy-efficiency, Detailed Measurements From These Facilities' Operations Are Not Widely Available, As Data Center Design Is Often Considered Part Of A Company's Competitive Advantage. However, Such Detailed Measurements Are Critical To The Research Community In Motivating And Evaluating New Energy-efficiency Optimizations. In This Paper, We Present A Detailed Analysis Of A State-of-the-art 15mw Green Multi-tenant Data Center That Incorporates Many Of The Technological Advances Used In Commercial Data Centers. We Analyze The Data Center's Computing Load And Its Impact On Power, Water, And Carbon Usage Using Standard Effectiveness Metrics, Including Pue, Wue, And Cue. Our Results Reveal The Benefits Of Optimizations, Such As Free Cooling, And Provide Insights Into How The Various Effectiveness Metrics Change With The Seasons And Increasing Capacity Usage. More Broadly, Our Pue, Wue, And Cue Analysis Validate The Green Design Of This Leed Platinum Data Center.
BibTeX:
@inproceedings{pegus-analyzing:2016,
  author = {Pegus, Ii, Patrick And Varghese, Benoy And Guo, Tian And Irwin, David And Shenoy, Prashant And Mahanti, Anirban And Culbert, James And Goodhue, John And Hill, Chris},
  title = {Analyzing The Efficiency Of A Green University Data Center},
  booktitle = {7th ACM/SPEC},
  publisher = {ACM},
  year = {2016},
  doi = {10.1145/2851553.2851557}
}
Nkenyereye L and Jang J-w (2016), "A Remote System for Monitoring Auxiliary Data Center from Environmental Threats with Lower Hardware Cost", 7th EUSPN 2016 / 6th ICTH-2016. Vol. 98, pp. 187-192.
Abstract: There are some data centers that are located far from the main data center for ensuring continuity of company's Information Technology operations in case the main data center encounters a serious downtime especially for companies engaged to work in distributed manner to increase quality-of -service to their customers. Environmental downtime is a significant cost to organizations and makes them unable to do business because what happens in the data center affects everyone. In addition, the amount of electrical energy consumed by data center increases with the amount of computing power installed. One strategy of reducing energy consumption in data centers is to adjust the temperature and humidity data. Installation of physical Information Technology and facilities related to environment on monitoring temperature, humidity, power, flood, smoke, air flow, room entry is the most proactive way to reduce the unnecessary costs of expensive hardware replacement or unplanned downtime and decrease energy consumed by servers. In this paper, we present remote system for monitoring auxiliary data centers implemented using open-source hardware platforms, Arduino, Raspberry Pi, and the Gobetwino. The objective of collecting temperature and humidity data allows monitoring server's health and gets alerts if things start to go wrong. When the temperature hits 50oC., the supervisor at remote headquarters would get a SMS, then take appropriate actions that make sense to reduce electrical costs and preserve functionality of servers in auxiliary data centers. The data center environmental monitoring represents a step forward towards addressing awareness monitoring of energy efficiency and data centers located far with ability to receive a notification by email.
BibTeX:
@article{remote:2016,
  author = {Nkenyereye, Lionel and Jang, Jong-wook},
  title = {A Remote System for Monitoring Auxiliary Data Center from Environmental Threats with Lower Hardware Cost},
  journal = {7th EUSPN 2016 / 6th ICTH-2016},
  year = {2016},
  volume = {98},
  pages = {187--192},
  doi = {10.1016/j.procs.2016.09.030}
}
Riekstin ACetal (2016), "Orchestration of Energy Efficiency Capabilities in Networks", 59th JNCA 2015.
BibTeX:
@article{riekstin-orchestration_resumed:2016,
  author = {Riekstin, A. C. et. al},
  title = {Orchestration of Energy Efficiency Capabilities in Networks},
  journal = {59th JNCA 2015},
  year = {2016}
}
Riekstin AC, Januário GC, Rodrigues BB, Nascimento VT, Carvalho TC and Meirosu C (2016), "Orchestration Of Energy Efficiency Capabilities In Networks", Journal Of Network And Computer Applications. Vol. 59, pp. 74-87.
Abstract: The Energy Demand For Operating Information And Communication Technology (ict) Systems Has Been Growing, Implying In High Operational Costs And Consequent Increase Of Carbon Emissions. Both In Datacenters And Telecom Infrastructures, The Networks Represent A Significant Amount Of Energy Spending. Given That, There Is An Increased Demand For Energy Efficiency Solutions, And Several Capabilities To Save Energy Have Been Proposed. However, It Is Very Difficult To Orchestrate Such Energy Efficiency Capabilities, That Is, Coordinate Or Combine Them In The Same Network, Ensuring A Conflict-free Operation And Choosing The Best One For A Given Scenario, Ensuring That A Capability Not Suited To The Current Bandwidth Utilization Will Not Be Applied And Lead To Congestion Or Packet Loss. There Is Neither A Way To Do This Taking Business Directives Into Account. In This Regard, A Method Able To Orchestrate Different Energy Efficiency Capabilities Is Proposed Considering The Possible Combinations And Conflicts Among Them, As Well As The Best Option For A Given Workload And Network Characteristics. The Business Policies Are Refined Down To The Network Level In Order To Bring High-level Directives Into The Operation, And A Utility Function Is Used To Combine Energy Efficiency And Performance Requirements. A Decision Tree Able To Determine What To Do In Each Scenario Is Deployed In A Software Defined Network Environment. The Proposed Method Was Validated With Different Experiments, Testing The Utility Function, Checking The Extra Savings When Combining Several Capabilities, The Decision Tree Interpolation And Dynamicity Aspects. The Orchestration Proved Valid To Solve The Problem Of Finding The Best Combination For A Given Scenario, Achieving Additional Savings Due To The Combination, Besides Ensuring A Conflict-free Operation. Highlightsa Method Able To Orchestrate Different Energy Efficiency Capabilities Is Proposed.it Considers The Possible Combinations And Conflicts Among The Capabilities.it Refines Business Policies To Bring High-level Directives Into The Operation.the Method Solves The Problem Of Finding The Best Combination For A Given Scenario.and Achieved Additional Savings, Besides Ensuring A Conflict-free Operation.
BibTeX:
@article{riekstin-orchestration:2016,
  author = {Riekstin, Ana C. And Januário, Guilherme C. And Rodrigues, Bruno B. And Nascimento, Viviane T. And Carvalho, Tereza C.m.b. And Meirosu, Catalin},
  title = {Orchestration Of Energy Efficiency Capabilities In Networks},
  journal = {Journal Of Network And Computer Applications},
  year = {2016},
  volume = {59},
  pages = {74--87},
  doi = {10.1016/j.jnca.2015.06.015}
}
Sartor D (2016), "Better Buildings, Better Data Centers: Applying Best Practices".
BibTeX:
@misc{sartor-better:2016,
  author = {Sartor, Dale},
  title = {Better Buildings, Better Data Centers: Applying Best Practices},
  year = {2016},
  note = {Rocky Mountain Green 2016},
  url = {https://datacenters.lbl.gov/resources/better-buildings-better-data-centers-0}
}
Sciammarella T, Couto RS, Rubinstein MG, Campista MEM and Costa LHM (2016), "Uma Análise do Tráfego de Controle de uma Nuvem IaaS Geodistribuıda"
BibTeX:
@article{sciammarella-uma:2016,
  author = {Sciammarella, Tatiana and Couto, Rodrigo S and Rubinstein, Marcelo G and Campista, Miguel Elias M and Costa, Luıs Henrique MK},
  title = {Uma Análise do Tráfego de Controle de uma Nuvem IaaS Geodistribuıda},
  year = {2016}
}
Group S (2016), "Sgs Group"
Abstract: A Sgs é Líder Mundial Em Inspeção, Verificação, Testes E Certificação. Somos Reconhecidos Como Referência Mundial Em Qualidade E Integridade.
BibTeX:
@online{sgs-group-sgs:2016,
  author = {Sgs Group},
  title = {Sgs Group},
  year = {2016},
  url = {http://www.sgsgroup.com.br/}
}
Shehabi A (2016), "United States Data Center Energy Usage Report". Thesis at: United States Departament Of Energy.
Abstract: Textlessptextgreaterthis Report Estimates Historical Data Center Electricity Consumption Back To 2000, Relying On Previous Studies And Historical Shipment Data, And Forecasts Consumption Out To 2020 Based On New Trends And The Most Recent Data Available. In 2014, Data Centers In The U.s. Consumed An Estimated 70 Billion Kwh, Representing About 1.8% Of Total U.s. Electricity Consumption. This Report Shows That Data Center Electricity Consumption Increased By About 4% From 2010-2014, A Large Shift From The 24% Percent Increase Estimated From 2005-2010 And The Nearly 90% Increase Estimated From 2000-2005. Energy Use Is Expected To Continue Slightly Increasing In The Near Future, Increasing 4% From 2014-2020, The Same Rate As The Past Five Years. Based On Current Trend Estimates, U.s. Data Centers Are Projected To Consume Approximately 73 Billion Kwh In 2020. A Combination Of Efficiency Trends Has Resulted In A Relatively Steady U.s Data Center Electricity Demand Over The Past 5 Years, With Little Growth Expected For The Remainder Of This Decade. Along With The Energy Efficiency Resource Already Achieved, There Are Additional Energy Efficiency Strategies And Technologies That Could Significantly Reduce Data Center Electricity Use Below The Approximately 73 Billion Kwh Demand Projected In 2020. Many Of These Efficiency Strategies Are Already Successfully Employed In Some Data Centers While Others Are Emerging Technologies That Will Be Commercially Available In The Near Future. The Potential Impact From An Adoption Of Additional Energy Efficiency Strategies Is Explored, Which Estimate An Annual Saving In 2020 Up To 33 Billion Kwh, Representing A 45% Reduction In Electricity Demand When Compared To Current Efficiency Trends.textless/ptextgreater
BibTeX:
@report{shehabi-united:2016,
  author = {Shehabi, Arman},
  title = {United States Data Center Energy Usage Report},
  school = {United States Departament Of Energy},
  year = {2016}
}
Xiong H, Fowley F and Pahl C (2016), "A database-specific pattern for multi-cloud high availability and disaster recovery", Communications in Computer and Information Science. Vol. 567, pp. 374-388.
Abstract: High availability and disaster recovery (HADR) are often discussed in highly critical business systems for business function recovery and continuity concerns. With the development of cloud computing, virtual cloud services are perfectly matched to HADR scenarios, and interoperability is a significant aspect to help users to use HADR service across different cloud platforms and providers. In this paper, we present an architectural pattern describing the integration of high availability and disaster recovery. We focus on database cluster replication between private cloud and public cloud environments. This HADR pattern for database cluster replication implements both synchronous and asynchronous replication concurrently for high availability and disaster recovery purposes. To evaluate the effectiveness of this pattern, we simulate a MySQL-database-cluster HADR scenario under three strategies: hot standby, warm standby and cold standby, and analyze the performance, business continuity features and cost. © Springer International Publishing Switzerland 2016.
BibTeX:
@article{xiong-database:2016,
  author = {Xiong, H. and Fowley, F. and Pahl, C.},
  title = {A database-specific pattern for multi-cloud high availability and disaster recovery},
  journal = {Communications in Computer and Information Science},
  year = {2016},
  volume = {567},
  pages = {374--388},
  doi = {10.1007/978-3-319-33313-7_29}
}
Xu M, Tian W and Buyya R (2016), "A Survey On Load Balancing Algorithms For Vm Placement In Cloud Computing", Distributed, Parallel, And Cluster Computing (cs.dc).
Abstract: The Emergence Of Cloud Computing Based On Virtualization Technologies Brings Huge Opportunities To Host Virtual Resource At Low Cost Without The Need Of Owning Any Infrastructure. Virtualization Technologies Enable Users To Acquire, Configure And Be Charged On Pay-per-use Basis. However, Cloud Data Centers Mostly Comprise Heterogeneous Commodity Servers Hosting Multiple Virtual Machines (vms) With Potential Various Specifications And Fluctuating Resource Usages, Which May Cause Imbalanced Resource Utilization Within Servers That May Lead To Performance Degradation And Service Level Agreements (slas) Violations. To Achieve Efficient Scheduling, These Challenges Should Be Addressed And Solved By Using Load Balancing Strategies, Which Have Been Proved To Be Np-hard Problem. From Multiple Perspectives, This Work Identifies The Challenges And Analyzes Existing Algorithms For Allocating Vms To Pms In Infrastructure Clouds, Especially Focuses On Load Balancing. A Detailed Classification Targeting Load Balancing Algorithms For Vm Placement In Cloud Data Centers Is Investigated And The Surveyed Algorithms Are Classified According To The Classification. The Goal Of This Paper Is To Provide A Comprehensive And Comparative Understanding Of Existing Literature And Aid Researchers By Providing An Insight For Potential Future Enhancements.
BibTeX:
@article{xu-survey:2016,
  author = {Xu, Minxian And Tian, Wenhong And Buyya, Rajkumar},
  title = {A Survey On Load Balancing Algorithms For Vm Placement In Cloud Computing},
  journal = {Distributed, Parallel, And Cluster Computing (cs.dc)},
  year = {2016},
  url = {http://arxiv.org/abs/1607.06269}
}
Zhou A, Wang S, Cheng B, Zheng Z, Yang F, Chang R, Lyu M and Buyya R (2016), "Cloud service reliability enhancement via virtual machine placement optimization", IEEE Transactions on Services Computing.
BibTeX:
@article{zhou-cloud:2016,
  author = {Zhou, Ao and Wang, Shangguang and Cheng, Bo and Zheng, Zibin and Yang, Fangchun and Chang, Rong and Lyu, Michael and Buyya, Rajkumar},
  title = {Cloud service reliability enhancement via virtual machine placement optimization},
  journal = {IEEE Transactions on Services Computing},
  year = {2016},
  url = {http://ieeexplore.ieee.org/abstract/document/7387769/}
}
Guyon D, Orgerie A-C and Morin C (2015), "Energy-Efficient User-Oriented Cloud Elasticity for Data-Driven Applications", December, 2015. , pp. 376-383. IEEE.
BibTeX:
@inproceedings{guyon-energy-efficient:2015,
  author = {Guyon, David and Orgerie, Anne-Cecile and Morin, Christine},
  title = {Energy-Efficient User-Oriented Cloud Elasticity for Data-Driven Applications},
  publisher = {IEEE},
  year = {2015},
  pages = {376--383},
  url = {http://ieeexplore.ieee.org/document/7396529/},
  doi = {10.1109/DSDIS.2015.57}
}
Ihara D, Lopez-Pires F and Baran B (2015), "Many-Objective Virtual Machine Placement for Dynamic Environments", dec, 2015.
Abstract: This paper presents for the first time a formulation of the Virtual Machine Placement as a Many-Objective problem (MaVMP), considering the simultaneous optimization of the following five objective functions for dynamic environments: (1) power consumption, (2) inter-VM network traffic, (3) economical revenue, (4) number of VM migrations and (5) network traffic overhead for VM migrations. To solve the formulated MaVMP
problem, a novel Memetic Algorithm is proposed. As a potentially large number of feasible solutions at any time is one of the challenges of MaVMP, five selection strategies are evaluated in order to automatically select one solution at each time. The proposed algorithm with the considered selection strategies were evaluated in two different scenarios.
BibTeX:
@inproceedings{ihara-many-objective:2015,
  author = {Ihara, Diego and Lopez-Pires, Fabio and Baran, Benjamin},
  title = {Many-Objective Virtual Machine Placement for Dynamic Environments},
  year = {2015}
}
Persico V, Marchetta P, Botta A and Pescapè A (2015), "Measuring Network Throughput in the Cloud", Comput. Netw.., December, 2015. Vol. 93(P3), pp. 408-422.
Abstract: Cloud providers employ sophisticated virtualization techniques and strategies for sharing resources among a large number of largely uncoordinated and mutually untrusted customers. The shared networking environment, in particular, dictates the need for mechanisms to partition network resources among virtual machines. At the same time, the performance of applications deployed over these virtual machines may be heavily impacted by the performance of the underlying network, and therefore by such mechanisms. Nevertheless, due to security and commercial reasons, providers rarely provide detailed information on network organization, performance, and mechanisms employed to regulate it. In addition, the scientific literature only provides a blurred image of the network performance inside the cloud. The few available pioneer works marginally focus on this aspect, use different methodologies, operate in few limited scenarios, or report conflicting results.In this paper, we present a detailed analysis of the performance of the internal network of Amazon EC2, performed by adopting a non-cooperative experimental evaluation approach (i.e.¿not relying on provider support). Our aim is to provide a quantitative assessment of the networking performance as a function of the several variables available, such as geographic region, resource price or size. We propose a detailed methodology to perform this kind of analysis, which we believe is essential in a such complex and dynamic environment. During this analysis we have discovered and analyzed the limitations enforced by Amazon over customer traffic in terms of maximum throughput allowed. Thanks to our work it is possible to understand how the complex mechanisms enforced by the provider in order to manage its infrastructure impact the performance perceived by the cloud customers and potentially tamper with monitoring and controlling approaches previously proposed in literature. Leveraging our knowledge of the bandwidth-limiting mechanisms, we then present a clear picture of the maximum throughput achievable in Amazon EC2 network, shedding light on when and how such maximum throughput can be achieved and at which cost.
BibTeX:
@article{persico-measuring:2015,
  author = {Persico, Valerio and Marchetta, Pietro and Botta, Alessio and Pescapè, Antonio},
  title = {Measuring Network Throughput in the Cloud},
  journal = {Comput. Netw.},
  year = {2015},
  volume = {93},
  number = {P3},
  pages = {408--422},
  url = {http://dx.doi.org/10.1016/j.comnet.2015.09.037},
  doi = {10.1016/j.comnet.2015.09.037}
}
Riasetiawan M, Ashari A and Endrayanto I (2015), "Distributed Replicated Block Device (DRDB) implementation on cluster storage data migration", In 2015 International Conference on Data and Software Engineering (ICoDSE)., November, 2015. , pp. 93-97.
Abstract: Data Center systems are required to have high availability in order to meet the continuing needs of the user. If the server or application in which a failure or require maintenance, the virtual machine will be migrated to another server in the cluster are still available. The role of shared storage is very important here to keep a virtual machine that can continue the work and do not lose data on the destination server. This study seeks to implement virtualization at the server cluster system uses a virtual machine on Proxmox VE and Distributed Replicated Block Device (DRBD) as shared storage. Implementation is done by using two nodes, and made a comparison with two other nodes in the cluster system that does not use shared storage. Shared storage works by synchronizing and replication of virtual machine data that can then migrate online. The use of shared storage will affect virtual machine performance, especially on the speed of the disk during the process of sending and receiving data, and the availability of services. Measurement of downtime during the migration to test the success of the system. Testing the TCP connection is made to ensure the network throughput and connection test results compared to test disk performance.
BibTeX:
@inproceedings{riasetiawan-distributed:2015,
  author = {Riasetiawan, M. and Ashari, A. and Endrayanto, I.},
  title = {Distributed Replicated Block Device (DRDB) implementation on cluster storage data migration},
  booktitle = {2015 International Conference on Data and Software Engineering (ICoDSE)},
  year = {2015},
  pages = {93--97},
  doi = {10.1109/ICODSE.2015.7436978}
}
Zhang X, Gaddam S and Chronopoulos AT (2015), "Ceph Distributed File System Benchmarks on an Openstack Cloud", In 2015 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)., November, 2015. , pp. 113-120.
Abstract: Ceph is a distributed file system that provides high performance, reliability, and scalability. Ceph maximizes the separation between data and metadata management by replacing allocation tables with a pseudo-random data distribution function (CRUSH) designed for heterogeneous and dynamic clusters of unreliable object storage devices (OSDs). In this paper, we investigate the performance of Ceph on an Open Stack cloud using well-known benchmarks. Our results show its good performance and scalability.
BibTeX:
@inproceedings{zhang-ceph:2015,
  author = {Zhang, X. and Gaddam, S. and Chronopoulos, A. T.},
  title = {Ceph Distributed File System Benchmarks on an Openstack Cloud},
  booktitle = {2015 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)},
  year = {2015},
  pages = {113--120},
  doi = {10.1109/CCEM.2015.12}
}
Ranjan R, Benatallah B, Dustdar S and Papazoglou MP (2015), "Cloud Resource Orchestration Programming: Overview, Issues, and Directions", IEEE Internet Computing., September, 2015. Vol. 19(5), pp. 46-56.
Abstract: Cloud computing provides on-demand access to affordable hardware (such as multicore CPUs, GPUs, disk drives, and networking equipment) and software (databases, application servers, load-balancers, data processors, and frameworks). The pervasiveness and power of cloud computing alleviates some of the problems that application administrators face in their existing hardware and locally managed software environments. However, the rapid increase in scale, dynamicity, heterogeneity, and diversity of cloud resources necessitates having expert knowledge about programming complex orchestration operations (for example, selection, deployment, monitoring, and runtime control) on those resources to achieve the desired quality of service. This article provides an overview of the key cloud resource types and resource orchestration operations, with special focus on research issues involved in programming those operations.
BibTeX:
@article{ranjan-cloud:2015,
  author = {Ranjan, R. and Benatallah, B. and Dustdar, S. and Papazoglou, M. P.},
  title = {Cloud Resource Orchestration Programming: Overview, Issues, and Directions},
  journal = {IEEE Internet Computing},
  year = {2015},
  volume = {19},
  number = {5},
  pages = {46--56},
  doi = {10.1109/MIC.2015.20}
}
Villamizar M, Garcés O, Castro H, Verano M, Salamanca L, Casallas R and Gil S (2015), "Evaluating the monolithic and the microservice architecture pattern to deploy web applications in the cloud", In 2015 10th Computing Colombian Conference (10CCC)., September, 2015. , pp. 583-590.
Abstract: Cloud computing provides new opportunities to deploy scalable application in an efficient way, allowing enterprise applications to dynamically adjust their computing resources on demand. In this paper we analyze and test the microservice architecture pattern, used during the last years by large Internet companies like Amazon, Netflix and LinkedIn to deploy large applications in the cloud as a set of small services that can be developed, tested, deployed, scaled, operated and upgraded independently, allowing these companies to gain agility, reduce complexity and scale their applications in the cloud in a more efficient way. We present a case study where an enterprise application was developed and deployed in the cloud using a monolithic approach and a microservice architecture using the Play web framework. We show the results of performance tests executed on both applications, and we describe the benefits and challenges that existing enterprises can get and face when they implement microservices in their applications.
BibTeX:
@inproceedings{villamizar-evaluating:2015,
  author = {Villamizar, M. and Garcés, O. and Castro, H. and Verano, M. and Salamanca, L. and Casallas, R. and Gil, S.},
  title = {Evaluating the monolithic and the microservice architecture pattern to deploy web applications in the cloud},
  booktitle = {2015 10th Computing Colombian Conference (10CCC)},
  year = {2015},
  pages = {583--590},
  doi = {10.1109/ColumbianCC.2015.7333476}
}
Lopez-Pires F and Baran B (2015), "Virtual Machine Placement Literature Review", arXiv:1506.01509 [cs]., June, 2015.
Abstract: Cloud Computing Datacenters host millions of virtual machines (VMs) on real world scenarios. In this context, Virtual Machine Placement (VMP) is one of the most challenging problems in cloud infrastructure management, considering also the large number of possible optimization criteria and different formulations that could be studied. VMP literature include relevant topics such as energy-efficiency, Service Level Agreements (SLA), cloud service markets, Quality of Service (QoS) and carbon dioxide emissions, all of them with high economical and ecological impact. This work presents an extensive up-to-date review of the most relevant VMP literature in order to identify research opportunities.
BibTeX:
@article{lopez-pires-virtual:2015,
  author = {Lopez-Pires, Fabio and Baran, Benjamin},
  title = {Virtual Machine Placement Literature Review},
  journal = {arXiv:1506.01509 [cs]},
  year = {2015},
  note = {arXiv: 1506.01509},
  url = {http://arxiv.org/abs/1506.01509}
}
Machado J-P and Forina M (2015), "Operational Energy Efficiency For Users (oeu); Referential Specification To Define Sustainable Levels Of Ict Sites". Thesis at: Etsi. France, June, 2015. (Etsi Gs Oeu 006), pp. 13.
BibTeX:
@report{machado-operational:2015,
  author = {Machado, Jose-Pedro And Forina, Marlène},
  title = {Operational Energy Efficiency For Users (oeu); Referential Specification To Define Sustainable Levels Of Ict Sites},
  school = {Etsi},
  year = {2015},
  number = {Etsi Gs Oeu 006},
  pages = {13}
}
Khan SU and Zomaya AY (2015), "Handbook On Data Centers", May, 2015. Springer.
Abstract: This Handbook Offers A Comprehensive Review Of The State-of-the-art Research Achievements In The Field Of Data Centers. Contributions From International, Leading Researchers And Scholars Offer Topics In Cloud Computing, Virtualization In Data Centers, Energy Efficient Data Centers, And Next Generation Data Center Architecture. It Also Comprises Current Research Trends In Emerging Areas, Such As Data Security, Data Protection Management, And Network Resource Management In Data Centers. Specific Attention Is Devoted To Industry Needs Associated With The Challenges Faced By Data Centers, Such As Various Power, Cooling, Floor Space, And Associated Environmental Health And Safety Issues, While Still Working To Support Growth Without Disrupting Quality Of Service. The Contributions Cut Across Various It Data Technology Domains As A Single Source To Discuss The Interdependencies That Need To Be Supported To Enable A Virtualized, Next-generation, Energy Efficient, Economical, And Environmentally Friendly Data Center. This Book Appeals To A Broad Spectrum Of Readers, Including Server, Storage, Networking, Database, And Applications Analysts, Administrators, And Architects. It Is Intended For Those Seeking To Gain A Stronger Grasp On Data Center Networks: The Fundamental Protocol Used By The Applications And The Network, The Typical Network Technologies, And Their Design Aspects. The Handbook Of Data Centers Is A Leading Reference On Design And Implementation For Planning, Implementing, And Operating Data Center Networks.
BibTeX:
@book{khan-handbook:2015,
  author = {Khan, Samee Ullah And Zomaya, Albert Y.},
  title = {Handbook On Data Centers},
  publisher = {Springer},
  year = {2015}
}
Silva FC (2015), "E-book - Data Centers (cpd) Para Pequenas E Médias Empresas". Thesis at: Redes&cia., April, 2015. , pp. 39.
Abstract: Traz Os Principais Conceitos Necessários Para Um Bom Projeto De Data Centers Para Pequenas E Médias Empresa, Também Conhecido Como Sala Segura Ou Cpd.
BibTeX:
@techreport{carvalho-ebook-redesecia:2015,
  author = {Silva, Fabrício Carvalho},
  title = {E-book - Data Centers (cpd) Para Pequenas E Médias Empresas},
  school = {Redes&cia},
  year = {2015},
  pages = {39}
}
Sato M, Matsunaga A, Chiba M, Shoujiguchi A and Yoshikawa M (2015), "Seeking An Energy-efficient Modular Data Center: Impact Of Pressure Loss On The Server Fan Power", In 2015 International Conference On Electronics Packaging And Imaps All Asia Conference (icep-iacc)., April, 2015. , pp. 617-622.
Abstract: With The Recent Development Of Cloud Computing, The Amount Of Information To Be Processed In Data Centers Has Increased In Recent Years, Which Promotes The Construction Of New Data Centers. Modular Data Center, Which Is Often Packaged In Standard Shipping Formats, Has Been Introduced To Construct And Deploy Data Centers More Quickly And Less Expensively Than Conventional Data Centers. To Conserve Cooling Costs In Modular Data Centers, A Hot Aisle/cold Aisle Arrangement Of The It Racks Is Employed By Dividing The Top Of The Racks Using The Partition. However, Dividing The Top Of The Racks Increases The Pressure Loss Of The Server Fans, Leading To The Increase Of The Server Fan Power. To Examine The Impact Of This Effect On Annual Cooling Power, We Perform Simulations Of Power Consumption In Modular Data Center That Introduces Outside Air And Louvers On The Top Of The Racks. By Varying The Opening Of The Louver As A Function Of The Outside Air Temperature And Rack Power, An Operation To Minimize The Power Consumption Of The Outside Air Fans And Server Fans Is Performed. We Find That Total Cooling Power In Case Where The Louver Is Operated So As To Minimize The Total Cooling Power Can Be Reduced Than That In Case Where The Louver Is Fully Closed By About 5% (10 For 10kw (20kw) Rack Power. We Discuss That The Popular Method For Calculating Energy Efficiency In Data Centers, I.e. Power Usage Effectiveness (pue), Defined By The Ratio Of Total Amount Of Energy Used To The Energy Used In It Equipment, Is Probably Incomplete Metric. Therefore, We Finally Propose That A Revised Pue That Removes The Server Fan Power From It Equipment Energy, And It Should Be Used To Estimate The Energy Efficiency To Avoid The Wrong Interpretation.
BibTeX:
@inproceedings{sato-seeking:2015,
  author = {Sato, M. And Matsunaga, A. And Chiba, M. And Shoujiguchi, A. And Yoshikawa, M.},
  title = {Seeking An Energy-efficient Modular Data Center: Impact Of Pressure Loss On The Server Fan Power},
  booktitle = {2015 International Conference On Electronics Packaging And Imaps All Asia Conference (icep-iacc)},
  year = {2015},
  pages = {617--622},
  doi = {10.1109/ICEP-IAAC.2015.7111088}
}
Arghode V, Kang T, Joshi Y, Phelps W and Michaels M (2015), "Anemometric Tool For Air Flow Rate Measurement Through Perforated Tiles In A Raised Floor Data Center", In Thermal Measurement, Modeling Management Symposium (semi-therm), 2015 31st., March, 2015. , pp. 163-171.
Abstract: In A Raised Floor Data Center, Cold Air From A Pressurized Sub-floor Plenum Reaches The Data Center Room Space Through Perforated Floor Tiles. Presently, Commercially Available Tool ''flow Hood'' (also Known As ''balometer'') Is Used To Measure The Air Flow Rate Through The Tiles. In The Present Paper, We Will Discuss The Operating Principle And The Shortcomings Of The Commercial Tool And Will Investigate A Simple Tile Air Flow Rate Measurement Tool Having An Array Of Thermal Anemometers (here Termed As ''anemometric Tool''). The Performance Of Both The Tools Is Compared For Different Types Of Tiles (passive And Active) For A Wide Range Of Tile Air Flow Rates. It Is Found That The Anemometric Tool Results In Lower Flow Rate Measurement Uncertainty And Works More Effectively For High Porosity Tiles, As Compared To The Commercial Tool Flow Hood.
BibTeX:
@inproceedings{arghode-anemometric:2015,
  author = {Arghode, V.k. And Kang, T. And Joshi, Y. And Phelps, W. And Michaels, M.},
  title = {Anemometric Tool For Air Flow Rate Measurement Through Perforated Tiles In A Raised Floor Data Center},
  booktitle = {Thermal Measurement, Modeling Management Symposium (semi-therm), 2015 31st},
  year = {2015},
  pages = {163--171},
  doi = {10.1109/SEMI-THERM.2015.7100155}
}
Zhang Y, Peng Z, Jiang J, Li H and Fujita M (2015), "Temperature-aware Software-based Self-testing For Delay Faults", In Design, Automation Test In Europe Conference Exhibition (date), 2015., March, 2015. , pp. 423-428.
Abstract: Delay Defects Under High Temperature Have Been One Of The Most Critical Factors To Affect The Reliability Of Computer Systems, And The Current Test Methods Don't Address This Problem Properly. In This Paper, A Temperature-aware Software-based Self-testing (sbst) Technique Is Proposed To Self-heat The Processors Within A High Temperature Range And Effectively Test Delay Faults Under High Temperature. First, It Automatically Generates High-quality Test Programs Through Automatic Test Instruction Generation (atig), And Avoids Over-testing Caused By Nonfunctional Patterns. Second, It Exploits Two Effective Powerintensive Program Transformations To Self-heat Up The Processors Internally. Third, It Applies A Greedy Algorithm To Search The Optimized Schedule Of The Test Templates In Order To Generate The Test Program While Making Sure That The Temperature Of The Processor Under Test Is Within The Specified Range. Experimental Results Show That The Generated Program Is Successful To Guarantee Delay Test Within The Given Temperature Range, And Achieves High Test Performance With Functional Patterns.
BibTeX:
@inproceedings{zhang-temperature-aware:2015,
  author = {Zhang, Ying And Peng, Zebo And Jiang, Jianhui And Li, Huawei And Fujita, M.},
  title = {Temperature-aware Software-based Self-testing For Delay Faults},
  booktitle = {Design, Automation Test In Europe Conference Exhibition (date), 2015},
  year = {2015},
  pages = {423--428}
}
Congdon L (2015), "8 Advantages Of Using Open Source In The Enterprise Textbar The Enterprisers Project". February, 2015.
Abstract: I Work With It Teams That Are So Passionate About Red Hat’s Open Source Mission That They Bring A ''default To Open Source'' Mentality To Every Project We Work On. We’ve Been Quite Successful In Finding Open Source Solutions For Many Of Our Business Needs. Naturally, We Turn To Our Own Open Source Solutions For Our Operating System, Middleware, And Cloud Needs. Beyond That, We Always Seek Out Open Source Solutions First For Our Other Business Needs, Such As User Authorization And Telephony.
BibTeX:
@misc{congdon-8advantages:2015,
  author = {Congdon, Lee},
  title = {8 Advantages Of Using Open Source In The Enterprise Textbar The Enterprisers Project},
  journal = {Enterprisers Project},
  year = {2015},
  url = {https://enterprisersproject.com/article/2015/1/top-advantages-open-source-offers-over-proprietary-solutions}
}
Raguvaran K and Thiyagarajan J (2015), "Raspberry Pi Based Global Industrial Process Monitoring Through Wireless Communication", In 2015 International Conference On Robotics, Automation, Control And Embedded Systems (race)., February, 2015. , pp. 1-6.
Abstract: This Paper Proposes An Advanced System For Process Management Via A Credit Card Sized Single Board Computer Called Raspberry Pi Based Multi Parameter Monitoring Hardware System Designed Using Rs232 And Microcontroller That Measures And Controls Various Global Parameters. The System Comprises Of A Single Master And Multiple Slaves With Wireless Mode Of Communication And A Raspberry Pi System That Can Either Operate On Windows Or Linux Operating System. The Parameters That Can Be Tracked Are Current, Voltage, Temperature, Light Intensity And Water Level. The Hardware Design Is Done With The Surface Mount Devices (smd) On A Double Layer Printed Circuit Board (pcb) To Reduced The Size And Improve The Power Efficiency. The Various Interesting Features Are Field Device Communication Via Usb-otg Enabled Android Devices, On Field Firm Ware Update Without Any Specific Hardware And Remote Monitoring And Control.
BibTeX:
@inproceedings{raguvaran-raspberry:2015,
  author = {Raguvaran, K. And Thiyagarajan, J.},
  title = {Raspberry Pi Based Global Industrial Process Monitoring Through Wireless Communication},
  booktitle = {2015 International Conference On Robotics, Automation, Control And Embedded Systems (race)},
  year = {2015},
  pages = {1--6},
  doi = {10.1109/RACE.2015.7097298}
}
Wu C and Buyya R (2015), "Cloud Data Centers and Cost Modeling: A Complete Guide To Planning, Designing and Building a Cloud Data Center", February, 2015. Morgan Kaufmann.
Abstract: Cloud Data Centers and Cost Modeling establishes a framework for strategic decision-makers to facilitate the development of cloud data centers. Just as building a house requires a clear understanding of the blueprints, architecture, and costs of the project; building a cloud-based data center requires similar knowledge. The authors take a theoretical and practical approach, starting with the key questions to help uncover needs and clarify project scope. They then demonstrate probability tools to test and support decisions, and provide processes that resolve key issues. After laying a foundation of cloud concepts and definitions, the book addresses data center creation, infrastructure development, cost modeling, and simulations in decision-making, each part building on the previous. In this way the authors bridge technology, management, and infrastructure as a service, in one complete guide to data centers that facilitates educated decision making.Explains how to balance cloud computing functionality with data center efficiencyCovers key requirements for power management, cooling, server planning, virtualization, and storage managementDescribes advanced methods for modeling cloud computing cost including Real Option Theory and Monte Carlo SimulationsBlends theoretical and practical discussions with insights for developers, consultants, and analysts considering data center development
BibTeX:
@book{wu-cloud:2015,
  author = {Wu, Caesar and Buyya, Rajkumar},
  title = {Cloud Data Centers and Cost Modeling: A Complete Guide To Planning, Designing and Building a Cloud Data Center},
  publisher = {Morgan Kaufmann},
  year = {2015}
}
Comerford T (2015), "How Data Center Operators Can Avoid Energy Price Hikes This Winter". January, 2015.
Abstract: Energy Consumption Is One Of The Largest Operating Expenses For A Data Center, Contributing To Nearly 50 Percent Of Total Data Center Spend. Data Center Operators And Owners Can Minimize The Impact Of Unpredictable Energy Markets By Better Understanding The Markets And Establishing Smart Energy Procurement Strategies.
BibTeX:
@misc{comerford-how:2015,
  author = {Comerford, Tim},
  title = {How Data Center Operators Can Avoid Energy Price Hikes This Winter},
  journal = {Data Center Knowledge},
  year = {2015},
  url = {http://www.datacenterknowledge.com/archives/2015/01/29/data-center-operators-can-avoid-energy-price-hikes-winter/}
}
Jokonya O (2015), "Investigating Open Source Software Benefits In Public Sector", In 2015 48th Hicss., January, 2015. , pp. 2242-2251.
Abstract: This Paper Investigates The Benefits Of Oss In Public Sector Organizations In Order To Understand The Trends And Patterns In Different Regions Over Time. Although Open Source Software Is Used Widely, In This Study The Authors Examine The Adoption Of Open Source Software In The Public Sector. As Such, The Paper Uses Content Analysis To Review Published Articles On Open Source Software In The Public Sector Or Government Organizations Between 2003 And 2012 Across The Regions (africa, America, Asia, And Europe). The Results Suggest That That There Is No-one-size-fit-all To Open Source Software Adoption Benefits To The Public Sector In Different Regions. The Results Also Show That Technical Benefits, Vendor Independence And Customization Are Considered To Be Important For Open Source Software Adoption In Public Sector Organizations. While This Suggests That Public Sector Organizations Perceive Open Source Software As One Step Towards Vendor Independence, Customization Is Considered A Very Important Benefit Of Open Source Software Adoption In Asia Than Is The Case In America.
BibTeX:
@inproceedings{jokonya-oss:2015,
  author = {Jokonya, O.},
  title = {Investigating Open Source Software Benefits In Public Sector},
  booktitle = {2015 48th Hicss},
  year = {2015},
  pages = {2242--2251},
  doi = {10.1109/HICSS.2015.268}
}
Marrone S and Nardone R (2015), "Automatic Resource Allocation for High Availability Cloud Services", Procedia Computer Science., January, 2015. Vol. 52, pp. 980-987.
Abstract: This paper proposes an approach to support cloud brokers finding optimal configurations in the deployment of dependability and security sensitive cloud applications. The approach is based on model-driven principles and uses both UML and Bayesian Networks to capture, analyse and optimise cloud deployment configurations. While the paper is most focused on the initial allocation phase, the approach is extensible to the operational phases of the life-cycle. In such a way, a continuous improvement of cloud applications may be realised by monitoring, enforcing and re-negotiating cloud resources following detected anomalies and failures.
BibTeX:
@article{marrone-automatic:2015,
  author = {Marrone, Stefano and Nardone, Roberto},
  title = {Automatic Resource Allocation for High Availability Cloud Services},
  journal = {Procedia Computer Science},
  year = {2015},
  volume = {52},
  pages = {980--987},
  url = {http://www.sciencedirect.com/science/article/pii/S187705091500976X},
  doi = {10.1016/j.procs.2015.05.176}
}
Abbas CJB, Orozco ALS and Villalba LJG (2015), "Monitoring Of Data Centers Using Wireless Sensor Networks", In Handbook On Data Centers. , pp. 1171-1183. Springer New York.
Abstract: As Data Center Energy Densities, Measured In Power Per Square Foot, Increase, Energy Savings For Cooling Can Be Carried Out By Applying Wsn Technology And Using The Gathered Information To Efficiently Manage The Data Center.
BibTeX:
@incollection{abbas-monitoring:2015,
  author = {Abbas, Cláudia Jacy Barenco And Orozco, Ana Lucila Sandoval And Villalba, Luis Javier García},
  editor = {Khan, Samee U. And Zomaya, Albert Y.},
  title = {Monitoring Of Data Centers Using Wireless Sensor Networks},
  booktitle = {Handbook On Data Centers},
  publisher = {Springer New York},
  year = {2015},
  pages = {1171--1183},
  note = {Doi: 10.1007/978-1-4939-2092-140},
  url = {http://link.springer.com/chapter/10.1007/978-1-4939-2092-140}
}
Ahmad RWetal (2015), "A Survey on Virtual Machine Migration and Server Consolidation Frameworks for Cloud Data Centers", JNCA. Vol. 52, pp. 11-25.
Abstract: Modern Cloud Data Centers exploit virtualization for efficient resource management to reduce cloud computational cost and energy budget. Virtualization empowered by virtual machine (VM) migration meets the ever increasing demands of dynamic workload by relocating VMs within Cloud Data Centers. VM migration helps successfully achieve various resource management objectives such as load balancing, power management, fault tolerance, and system maintenance. However, being resource-intensive, the VM migration process rigorously affects application performance unless attended by smart optimization methods. Furthermore, a Cloud Data Centre exploits server consolidation and DVFS methods to optimize energy consumption. This paper reviews state-of-the-art bandwidth optimization schemes, server consolidation frameworks, DVFS-enabled power optimization, and storage optimization methods over WAN links. Through a meticulous literature review of state-of-the-art live VM migration schemes, thematic taxonomies are proposed to categorize the reported literature. The critical aspects of virtual machine migration schemes are investigated through a comprehensive analysis of the existing schemes. The commonalties and differences among existing VM migration schemes are highlighted through a set of parameters derived from the literature. Finally, open research issues and trends in the VM migration domain that necessitate further consideration to develop optimal VM migration schemes are highlighted.
BibTeX:
@article{ahmad-survey:2015,
  author = {Ahmad, R. W. et. al},
  title = {A Survey on Virtual Machine Migration and Server Consolidation Frameworks for Cloud Data Centers},
  journal = {JNCA},
  year = {2015},
  volume = {52},
  pages = {11--25},
  url = {http://dx.doi.org/10.1016/j.jnca.2015.02.002},
  doi = {10.1016/j.jnca.2015.02.002}
}
Alkawsi GA, Mahmood AK and Baashar YM (2015), "Factors influencing the adoption of cloud computing in SME: A systematic review", In Mathematical Sciences and Computing Research (iSMSC), International Symposium on. , pp. 220-225. IEEE.
BibTeX:
@inproceedings{alkawsi-factors:2015,
  author = {Alkawsi, Gamal Abdulnaser and Mahmood, Ahmad Kamil and Baashar, Yahia Mohamed},
  title = {Factors influencing the adoption of cloud computing in SME: A systematic review},
  booktitle = {Mathematical Sciences and Computing Research (iSMSC), International Symposium on},
  publisher = {IEEE},
  year = {2015},
  pages = {220-225},
  url = {http://ieeexplore.ieee.org/abstract/document/7594056/}
}
Aslam S and Shah MA (2015), "Load Balancing Algorithms In Cloud Computing: A Survey Of Modern Techniques", In 2015 National Software Engineering Conference (nsec). , pp. 30-35.
Abstract: Cloud Computing Has Become Popular Due To Its Attractive Features. The Load On The Cloud Is Increasing Tremendously With The Development Of New Applications. Load Balancing Is An Important Part Of Cloud Computing Environment Which Ensures That All Devices Or Processors Perform Same Amount Of Work In Equal Amount Of Time. Different Models And Algorithms For Load Balancing In Cloud Computing Has Been Developed With The Aim To Make Cloud Resources Accessible To The End Users With Ease And Convenience. In This Paper, We Aim To Provide A Structured And Comprehensive Overview Of The Research On Load Balancing Algorithms In Cloud Computing. This Paper Surveys The State Of The Art Load Balancing Tools And Techniques Over The Period Of 2004-2015. We Group Existing Approaches Aimed At Providing Load Balancing In A Fair Manner. With This Categorization We Provide An Easy And Concise View Of The Underlying Model Adopted By Each Approach.
BibTeX:
@inproceedings{aslam-load:2015,
  author = {Aslam, S. And Shah, M. A.},
  title = {Load Balancing Algorithms In Cloud Computing: A Survey Of Modern Techniques},
  booktitle = {2015 National Software Engineering Conference (nsec)},
  year = {2015},
  pages = {30--35},
  doi = {10.1109/NSEC.2015.7396341}
}
Balter B (2015), "Open-source Alternatives To Proprietary Enterprise Software".
Abstract: Open-source-alternatives - A Collaborative List Of Open-source Alternatives To Typical Government And Enterprise Software Needs. Https://github.com/benbalter/open-source-alternatives
BibTeX:
@misc{balter-open-source:2015,
  author = {Balter, Benjamin},
  title = {Open-source Alternatives To Proprietary Enterprise Software},
  journal = {Open-source Alternatives To Proprietary Enterprise Software},
  year = {2015},
  url = {http://ben.balter.com/open-source-alternatives/}
}
Bijon K, Krishnan R and Sandhu R (2015), "Virtual Resource Orchestration Constraints in Cloud Infrastructure as a Service" , pp. 183-194. ACM Press.
BibTeX:
@inproceedings{bijon-virtual:2015,
  author = {Bijon, Khalid and Krishnan, Ram and Sandhu, Ravi},
  title = {Virtual Resource Orchestration Constraints in Cloud Infrastructure as a Service},
  publisher = {ACM Press},
  year = {2015},
  pages = {183--194},
  url = {http://dl.acm.org/citation.cfm?doid=2699026.2699112},
  doi = {10.1145/2699026.2699112}
}
Camargo DS and Miers CC (2015), "Automação Climática Em Sala De Servidores Utilizando Hardware Livre", SBC - Erad-RS. , pp. 177-180.
BibTeX:
@article{camargo-automacao-eradrs:2015,
  author = {Camargo, Daniel Scheidemantel And Miers, Charles Christian},
  title = {Automação Climática Em Sala De Servidores Utilizando Hardware Livre},
  journal = {SBC - Erad-RS},
  year = {2015},
  pages = {177--180}
}
Camargo DS and Miers CC (2015), "Monitoramento Ambiental Open Source Para Data Center", SBC - Erad-RJ.
BibTeX:
@article{camargo-monitoramento-eradrj:2015,
  author = {Camargo, Daniel Scheidemantel And Miers, Charles Christian},
  title = {Monitoramento Ambiental Open Source Para Data Center},
  journal = {SBC - Erad-RJ},
  year = {2015}
}
Camargo DS and Miers CC (2015), "Sensoriamento Climático Em Sala De Servidores Utilizando Soluções De Software E Hardware Livre", Anais Do Computer On The Beach. , pp. 389-391.
BibTeX:
@article{camargo-sensoriamento-cotb:2015,
  author = {Camargo, Daniel Scheidemantel And Miers, Charles Christian},
  title = {Sensoriamento Climático Em Sala De Servidores Utilizando Soluções De Software E Hardware Livre},
  journal = {Anais Do Computer On The Beach},
  year = {2015},
  pages = {389--391}
}
Camargo DS and Miers CC (2015), "Sensoriamento Em Sala De Servidores Baseado Em Software E Hardware Livres", Sic-udesc.
BibTeX:
@article{camargo-sensoriamento-sic:2015,
  author = {Camargo, Daniel Scheidemantel And Miers, Charles Christian},
  title = {Sensoriamento Em Sala De Servidores Baseado Em Software E Hardware Livres},
  journal = {Sic-udesc},
  year = {2015}
}
Cenelec-50600 (2015), "Cenelec/cei En 50600-2-3 : Information Technology - Data Centre Facilities And Infrastructures Part 2-3: Environmental Control". Thesis at: European Committee For Electrotechnical Standardization (cenelec) And Comitato Elettrotecnico Italiano (cei). Italy (50600-2-3), pp. 30.
Abstract: This European Standard Addresses Environmental Control Within Data Centres Based Upon The Criteria And Classifications For “availability”, “security” And “energy Efficiency Enablement” Within En 50600-1. This European Standard Specifies Requirements And Recommendations For The Following: A) Temperature Control, B) Fluid Movement Control, C) Relative Humidity Control, D) Particulate Control, E) Vibration, F) Floor Layout And Equipment Locations, G) Energy Saving Practices, H) Physical Security Of Environmental Control Systems.
BibTeX:
@report{cenelec-50600-cenelec/cei:2015,
  author = {Cenelec-50600},
  title = {Cenelec/cei En 50600-2-3 : Information Technology - Data Centre Facilities And Infrastructures Part 2-3: Environmental Control},
  school = {European Committee For Electrotechnical Standardization (cenelec) And Comitato Elettrotecnico Italiano (cei)},
  year = {2015},
  number = {50600-2-3},
  pages = {30},
  url = {https://global.ihs.com/doc_detail.cfm?&item_s_key=00674444&item_key_date=840031&input_doc_number=50600&input_doc_title=}
}
Chen S, Irving S and Peng L (2015), "Operational Cost Optimization For Cloud Computing Data Centers Using Renewable Energy", IEEE Systems Journal. Vol. Pp(99), pp. 1-12.
Abstract: The Electricity Cost Of Cloud Computing Data Centers, Dominated By Server Power And Cooling Power, Is Growing Rapidly. To Tackle This Problem, Inlet Air With Moderate Temperature And Server Consolidation Are Widely Adopted. However, The Benefit Of These Two Methods Is Limited Due To Conventional Air Cooling System Ineffectiveness Caused By Recirculation And Low Heat Capacity. To Address This Problem, Hybrid Air And Liquid Cooling, As A Practical And Inexpensive Approach, Has Been Introduced. In This Paper, We Quantitatively Analyze The Impact Of Server Consolidation And Temperature Of Cooling Water On The Total Electricity And Server Maintenance Costs In Hybrid Cooling Data Centers. To Minimize The Total Costs, We Proposed To Maintain Sweet Temperature And Available Sleeping Time Threshold (astt) By Which A Joint Cost Optimization Can Be Satisfied. By Using Real-world Traces, The Potential Savings Of Sweet Temperature And Astt Are Estimated To Be Average 23% Of The Total Cost, While 96% Requests Are Satisfied Compared To A Strategy Which Only Reduces Electricity Cost. The Co-optimization Is Extended To Increase The Benefit Of The Renewable Energy, And Its Profit Grows As More Wind Power Is Supplied.
BibTeX:
@article{chen-operational:2015,
  author = {Chen, S. And Irving, S. And Peng, L.},
  title = {Operational Cost Optimization For Cloud Computing Data Centers Using Renewable Energy},
  journal = {IEEE Systems Journal},
  year = {2015},
  volume = {Pp},
  number = {99},
  pages = {1--12},
  doi = {10.1109/JSYST.2015.2462714}
}
Chowdhury MR, Mahmud MR and Rahman RM (2015), "Implementation And Performance Analysis Of Various Vm Placement Strategies In Cloudsim", Journal Of Cloud Computing. Vol. 4(1), pp. 20.
Abstract: Infrastructure As A Service (iaas) Has Become One Of The Most Dominant Features That Cloud Computing Offers Nowadays. Iaas Enables Datacenter’s Hardware To Get Virtualized Which Allows Cloud Providers To Create Multiple Virtual Machine (vm) Instances On A Single Physical Machine, Thus Improving Resource Utilization And Increasing The Return On Investment (roi). Vm Consolidation Includes Issues Like Choosing Appropriate Algorithm For Selection Of Vms For Migration And Placement Of Vms To Suitable Hosts. Vms Need To Be Migrated From Overutilized Host To Guarantee That Demand For Computer Resources And Performance Requirements Are Accomplished. Besides, They Need To Be Migrated from Underutilized Host To Deactivate That Host For Saving Power Consumption. In Order To Solve The Problem Of Energy And Performance, Efficient Dynamic Vm Consolidation Approach Is Introduced In Literature. In This Work, We Have Proposed Multiple Redesigned Vm Placement Algorithms And Introduced A Technique By Clustering Vms To Migrate By Taking Account Both Cpu Utilization And Allocated Ram. We Implement And Study The Performance Of Our Algorithms On A Cloud Computing Simulation Toolkit Known As Cloudsim Using Planetlab Workload Data. Simulation Results Demonstrate That Our Proposed Techniques Outperform The Default Vm Placement Algorithm Designed In Cloudsim.
BibTeX:
@article{chowdhury-implementation:2015,
  author = {Chowdhury, Mohammed Rashid And Mahmud, Mohammad Raihan And Rahman, Rashedur M.},
  title = {Implementation And Performance Analysis Of Various Vm Placement Strategies In Cloudsim},
  journal = {Journal Of Cloud Computing},
  year = {2015},
  volume = {4},
  number = {1},
  pages = {20},
  doi = {10.1186/s13677-015-0045-5}
}
Cupertino L, Da Costa G, Oleksiak A, Pia¸tek W, Pierson J-m, Salom J, Sisó L, Stolf P, Sun H and Zilio T (2015), "Energy-efficient, Thermal-aware Modeling And Simulation Of Data Centers: The Coolemall Approach And Evaluation Results", Ad Hoc Networks. Vol. 25, Part B, pp. 535-553.
Abstract: This Paper Describes The Coolemall Project And Its Approach For Modeling And Simulating Energy-efficient And Thermal-aware Data Centers. The Aim Of The Project Was To Address Energy-thermal Efficiency Of Data Centers By Combining The Optimization Of It, Cooling And Workload Management. This Paper Provides A Complete Data Center Model Considering The Workload Profiles, The Applications Profiling, The Power Model And A Cooling Model. Different Energy Efficiency Metrics Are Proposed And Various Resource Management And Scheduling Policies Are Presented. The Proposed Strategies Are Validated Through Simulation At Different Levels Of A Data Center.
BibTeX:
@article{cupertino-energy-efficient:2015,
  author = {Cupertino, Leandro And Da Costa, Georges And Oleksiak, Ariel And Pia¸tek, Wojciech And Pierson, Jean-marc And Salom, Jaume And Sisó, Laura And Stolf, Patricia And Sun, Hongyang And Zilio, Thomas},
  title = {Energy-efficient, Thermal-aware Modeling And Simulation Of Data Centers: The Coolemall Approach And Evaluation Results},
  journal = {Ad Hoc Networks},
  year = {2015},
  volume = {25, Part B},
  pages = {535--553},
  url = {http://www.sciencedirect.com/science/article/pii/S1570870514002364},
  doi = {10.1016/j.adhoc.2014.11.002}
}
Denton J (2015), "Learning Openstack Networking (neutron)" Packt Publishing Ltd.
Abstract: Wield The Power Of Openstack Neutron Networking To Bring Network Infrastructure And Capabilities To Your Cloudabout This Bookthis Completely Up-to-date Edition Will Show You How To Deploy A Cloud On Openstack Using Community-driven Processes. It Includes Rich Examples That Will Help You Understand Complex Networking Topics With Easeunderstand Every Aspect Of Designing, Creating, Customizing, And Maintaining The Core Network Foundation Of An Openstack Cloud Using Openstack Neutron All In One Bookwritten By Best-selling Author James Denton, Who Has More Than 15 Years Of Experience In System Administration And Networking. James Has Experience Of Deploying, Operating, And Maintaining Openstack Clouds And Has Worked With Top Enterprises And Organizationswho This Book Is Forif You Are An Openstack-based Cloud Operator And Administrator Who Is New To Neutron Networking And Wants To Build Your Very Own Openstack Cloud, Then This Book Is For You.prior Networking Experience And A Physical Server And Network Infrastructure Is Recommended To Follow Along With Concepts Demonstrated In The Book.what You Will Learnarchitect And Install The Latest Release Of Openstack On Ubuntu Linux 14.04 Ltsreview The Components Of Openstack Networking, Including Plugins, Agents, And Services, And Learn How They Work Together To Coordinate Network Operationsbuild A Virtual Switching Infrastructure Using Reference Architectures Based On Ml2 + Open Vswitch Or Ml2 + Linuxbridgecreate Networks, Subnets, And Routers That Connect Virtual Machine Instances To The Networkdeploy Highly Available Routers Using Dvr Or Vrrp-based Methodsscale Your Application With Haproxy And Load Balancing As-a-serviceimplement Port And Router-level Security Using Security Groups And Firewall As-a-serviceprovide Connectivity To Tenant Networks With Virtual Private Networking As-a-service (vpnaas)find Out How To Manage Openstack Networking Resources Using Cli And Gui-driven Methodsin Detailopenstack Neutron Is An Openstack Component That Provides Networking As A Service For Other Openstack Services To Architect Networks And Create Virtual Machines Through Its Api. This Api Lets You Define Network Connectivity In Order To Leverage Network Capabilities To Cloud Deployments.through This Practical Book, You Will Build A Strong Foundational Knowledge Of Neutron, And Will Architect And Build An Openstack Cloud Using Advanced Networking Features.we Start With An Introduction To Openstack Neutron And Its Various Components, Including Virtual Switching, Routing, Fwaas, Vpnaas, And Lbaas. You'll Also Get Hands-on By Installing Openstack And Neutron And Its Components, And Use Agents And Plugins To Orchestrate Network Connectivity And Build A Virtual Switching Infrastructure.moving On, You'll Get To Grips With The Ha Routing Capabilities Utilizing Vrrp And Distributed Virtual Routers In Neutron. You'll Also Discover Load Balancing Fundamentals, Including The Difference Between Nodes, Pools, Pool Members, And Virtual Ips. You'll Discover The Purpose Of Security Groups And Learn How To Apply The Security Concept To Your Cloud/tenant/instance.finally, You'll Configure Virtual Private Networks That Will Allow You To Avoid The Use Of Snat And Floating Ips When Connecting To Remote Networks.style And Approachthis Easy-to-follow Guide On Networking In Openstack Follows A Step-by-step Process To Installing Openstack And Configuring The Base Networking Components. Each Major Networking Component Has A Dedicated Chapter That Will Build On Your Experience Gained From Prior Chapters.
BibTeX:
@book{denton-learning:2015,
  author = {Denton, James},
  title = {Learning Openstack Networking (neutron)},
  publisher = {Packt Publishing Ltd},
  year = {2015}
}
Ding Y, Qin X, Liu L and Wang T (2015), "Energy efficient scheduling of virtual machines in cloud with deadline constraint", Future Generation Computer Systems. Vol. 50, pp. 62-74.
BibTeX:
@article{ding-energy:2015,
  author = {Ding, Youwei and Qin, Xiaolin and Liu, Liang and Wang, Taochun},
  title = {Energy efficient scheduling of virtual machines in cloud with deadline constraint},
  journal = {Future Generation Computer Systems},
  year = {2015},
  volume = {50},
  pages = {62--74},
  url = {http://linkinghub.elsevier.com/retrieve/pii/S0167739X15000369},
  doi = {10.1016/j.future.2015.02.001}
}
Embratel-teleco (2015), "A Conectividade Das Empresas Brasileiras".
Abstract: As Empresas Sempre Contaram Com A Tecnologia Para Aumentar A Sua Produtividade. Nos últimos Anos, Porém, Este Processo Ficou Muito Acelerado, Principalmente Devido Ao Avanço Das Tecnologias De Informação, Que Resulta Na Formação De Empresas Cada Vez Mais Digitais. Neste Contexto, A Conectividade Passa A Ser Um Elemento Básico Para O Aumento Da Produtividade Das Empresas De Qualquer Porte E Em Qualquer Atividade. Para Acompanhar Este Processo, A Embratel, Em Parceria Com A Teleco, Selecionou Um Conjunto De Indicadores Para Avaliar O Quão Conectadas Estão As Empresas Brasileiras, Que Deu Origem à Pesquisa ``a Conectividade Das Empresas Brasileiras''.
BibTeX:
@misc{embratel-teleco-conectividade:2015,
  author = {Embratel-teleco},
  title = {A Conectividade Das Empresas Brasileiras},
  journal = {A Conectividade Das Empresas Brasileiras},
  year = {2015}
}
Eun KL (2015), "Proactive Thermal-aware Management In Cloud Datacenters". Thesis at: Rutgers University-graduate School-new Brunswick.
BibTeX:
@thesis{eun-proactive:2015,
  author = {Eun, Kyung Lee},
  title = {Proactive Thermal-aware Management In Cloud Datacenters},
  school = {Rutgers University-graduate School-new Brunswick},
  year = {2015},
  url = {https://rucore.libraries.rutgers.edu/rutgers-lib/46378/}
}
Forsman M, Glad A, Lundberg L and Ilie D (2015), "Algorithms for automated live migration of virtual machines", Journal of Systems and Software. Vol. 101, pp. 110-126.
BibTeX:
@article{forsman-algorithms:2015,
  author = {Forsman, Mattias and Glad, Andreas and Lundberg, Lars and Ilie, Dragos},
  title = {Algorithms for automated live migration of virtual machines},
  journal = {Journal of Systems and Software},
  year = {2015},
  volume = {101},
  pages = {110--126},
  url = {http://linkinghub.elsevier.com/retrieve/pii/S0164121214002751},
  doi = {10.1016/j.jss.2014.11.044}
}
Guzek M, Kliazovich D and Bouvry P (2015), "Heros: Energy-efficient Load Balancing For Heterogeneous Data Centers", In 8th IEEE-CLOUD 2015.
BibTeX:
@inproceedings{guzek-heros:2015,
  author = {Guzek, Mateusz And Kliazovich, Dzmitry And Bouvry, Pascal},
  title = {Heros: Energy-efficient Load Balancing For Heterogeneous Data Centers},
  booktitle = {8th IEEE-CLOUD 2015},
  year = {2015},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7214113}
}
Hieu N, Di F and Yla-Jaaski A (2015), "Virtual Machine Consolidation with Usage Prediction for Energy-Efficient Cloud Data Centers" , pp. 750-757.
Abstract: Virtual machine consolidation aims at reducing the number of active physical servers in a data center, with the goal to reduce the total power consumption. In this context, most of the existing solutions rely on aggressive virtual machine migration, thus resulting in unnecessary overhead and energy wastage. This article presents a virtual machine consolidation algorithm with usage prediction (VMCUP) for improving the energy efficiency of cloud data centers. Our algorithm is executed during the virtual machine consolidation process to estimate the short-term future CPU utilization based on the local history of the considered servers. The joint use of current and predicted CPU utilization metrics allows a reliable characterization of overloaded and under loaded servers, thereby reducing both the load and the power consumption after consolidation. We evaluate our proposed solution through simulations on real workloads from the Planet Lab and the Google Cluster Data datasets. In comparison with the state of the art, the obtained results show that consolidation with usage prediction reduces the total migrations and the power consumption of the servers while complying with the service level agreement. © 2015 IEEE.
BibTeX:
@inproceedings{hieu-virtual:2015,
  author = {Hieu, N.T. and Di, Francesco and Yla-Jaaski, A.},
  title = {Virtual Machine Consolidation with Usage Prediction for Energy-Efficient Cloud Data Centers},
  year = {2015},
  pages = {750--757},
  note = {DOI: 10.1109/CLOUD.2015.104},
  doi = {10.1109/CLOUD.2015.104}
}
Iso/iec-24764 (2015), "Iso/iec 24764:2010 - Information Technology -- Generic Cabling Systems For Data Centres". Thesis at: Ansi/bicsi. Usa (Iso/iec Jtc 1/sc 25), pp. 37.
Abstract: Iso/iec 24764:2010(e) Specifies Generic Cabling That Supports A Wide Range Of Communications Services For Use Within A Data Centre. It Covers Balanced Cabling And Optical Fibre Cabling. It Is Based Upon And References The Requirements Of Iso/iec 11801 And Contains Additional Requirements That Are Appropriate To Data Centres In Which The Maximum Distance Over Which Communications Services Have To Be Distributed Is 2 000 M.
BibTeX:
@report{iso/iec-24764-iso/iec:2015,
  author = {Iso/iec-24764},
  title = {Iso/iec 24764:2010 - Information Technology -- Generic Cabling Systems For Data Centres},
  school = {Ansi/bicsi},
  year = {2015},
  number = {Iso/iec Jtc 1/sc 25},
  pages = {37},
  url = {http://www.iso.org/iso/home/store/catalogue_tc/catalogue_detail.htm?csnumber=43520}
}
Koomey J and Taylor J (2015), "New data supports finding that 30 percent of servers are ‘Comatose’, indicating that nearly a third of capital in enterprise data centers is wasted", June. Vol. 3, pp. 2015.
BibTeX:
@article{koomey-new:2015,
  author = {Koomey, Jonathan and Taylor, Jon},
  title = {New data supports finding that 30 percent of servers are ‘Comatose’, indicating that nearly a third of capital in enterprise data centers is wasted},
  journal = {June},
  year = {2015},
  volume = {3},
  pages = {2015},
  url = {http://tsologic.com/wp-content/uploads/2015/06/AnthesisGroup-Case-Study-30PercentComatose-06032015.pdf}
}
Lim J, Lim H and Kang S (2015), "3d Stacked Dram Refresh Management With Guaranteed Data Reliability", IEEE Transactions On Computer-aided Design Of Integrated Circuits And Systems. Vol. Pp(99), pp. 1-1.
Abstract: The Three-dimensional (3d) Integrated Dynamic Random-access Memory (dram) Structure With A Processor Is Being Widely Studied Due To Advantages, Such As A Large Band-width And Data Communication Power Reduction. In These Structures, The Massive Heat Generation Of The Processor Results In A High Operating Temperature And A High Refresh Rate Of The Dram. Thus, In The 3d Dram Over Processor Architecture, Temperature-aware Refresh Management Is Necessary. However, Temperature Determination Is Difficult, Because In The 3d Dram, The Temperature Changes Dynamically And Temperature Variation In A Dram Die Is Complicated. In This Paper, A Thermal Guard-band Set-up Method For The 3d Stacked Dram Is Proposed. It Considers The Latency Of The Temperature Data And The Position Difference Between The Temperature Sensor And The Dram Cell. With This Method, The Data Reliability Of The On-chip Temperature Sensor Dependent Adaptive Refresh Control Is Guaranteed. In Addition, An Efficient Temperature Sensor Built-in And Refresh Control Method Is Analyzed. The Expected Refresh Power Reduction Is Examined Through A Simulation.
BibTeX:
@article{lim-3d:2015,
  author = {Lim, J. And Lim, H. And Kang, S.},
  title = {3d Stacked Dram Refresh Management With Guaranteed Data Reliability},
  journal = {IEEE Transactions On Computer-aided Design Of Integrated Circuits And Systems},
  year = {2015},
  volume = {Pp},
  number = {99},
  pages = {1--1},
  doi = {10.1109/TCAD.2015.2413411}
}
Malik A, Bilal K, Malik S, Anwar Z, Aziz K, Kliazovich D, Ghani N, Khan S and Buyya R (2015), "Cloudnetsim++: A Gui Based Framework For Modeling And Simulation Of Data Centers In Omnet++", IEEE Transactions On Services Computing. (1), pp. 1-1.
Abstract: State-of-the-art Cloud Simulators In Use Today Are Limited In The Number Of Features They Provide, Lack Real Network Communication Models, And Do Not Provide Extensive Graphical User Interface (gui) To Support Developers And Researchers To Extend The Behavior Of The Cloud Environment. We Propose Cloudnetsim++, A Comprehensive Packet Level Simulator That Enables Simulation Of Cloud Environments. Cloudnetsim++ Can Be Used To Evaluate A Wide Spectrum Of Cloud Components, Such As Processing Elements, Storage, Networking, Service Level Agreement (sla), Scheduling Algorithms, Fine Grained Energy Consumption, And Vm Consolidation Algorithms. Cloudnetsim++ Offers Extendibility, Which Means That The Developers And Researchers Can Easily Incorporate Own Algorithms For Scheduling, Workload Consolidation, Vm Migration, And Sla Agreement. The Simulation Environment Of Cloudnetsim++ Offers A Rich Gui That Provides A High Level View Of Distributed Data Centers Connected With Various Network Topologies. The Package Also Includes An Energy Computation Module That Provides A Fine Grained Analysis Of Energy Consumed By Each Component. This Paper Shows The Flexibility And Effectiveness Of Cloudnetsim++ Through Experimental Results Demonstrated Using Real-world Data Center Workloads. Moreover, To Demonstrate The Correctness Of Cloudnetsim++, We Performed Formal Modeling, Analysis, And Verification Using High-level Petri Nets, Satisfiability Modulo Theories (smt), And Z3 Solver.
BibTeX:
@article{malik-cloudnetsim:2015,
  author = {Malik, Asad And Bilal, Kashif And Malik, Saif And Anwar, Zahid And Aziz, Khurram And Kliazovich, Dzmitry And Ghani, Nasir And Khan, Samee And Buyya, Rajkumar},
  title = {Cloudnetsim++: A Gui Based Framework For Modeling And Simulation Of Data Centers In Omnet++},
  journal = {IEEE Transactions On Services Computing},
  year = {2015},
  number = {1},
  pages = {1--1},
  doi = {10.1109/TSC.2015.2496164}
}
Monil MAH and Rahman RM (2015), "Fuzzy Logic Based Energy Aware Vm Consolidation", In Internet And Distributed Computing Systems. (9258), pp. 31-38. Springer International Publishing.
Abstract: Global Need Of Computing Is Growing Day By Day And As A Result Cloud Based Services Are Getting More Prominent For Its Pay-as-you-go Modality. However, Cloud Based Datacenters Consume Considerable Amount Of Energy Which Draws Negative Attention. To Sustain The Growth Of Cloud Computing, Energy Consumption Is Now A Major Concern For Cloud Based Datacenters. To Overcome This Problem, Cloud Computing Algorithm Should Be Efficient Enough To Keep Energy Consumption Low And At The Same Time Provide Desired Qos. Virtual Machine Consolidation Is One Such Technique To Ensure Energy-qos Balance. In This Research, We Explored Fuzzy Logic And Heuristic Based Virtual Machine Consolidation Approach To Achieve Energy-qos Balance. Fuzzy Vm Selection Method Has Been Proposed To Select Vm From An Overloaded Host. Additionally, We Incorporated Migration Control In Fuzzy Vm Selection Method. We Have Used Cloudsim Toolkit To Simulate Our Experiment And Evaluate The Performance Of The Proposed Algorithm On Real-world Work Load Traces Of Planetlab Vms. Simulation Results Demonstrate That The Proposed Method Provides Best Performance In All Performance Metrics While Consuming Least Energy.
BibTeX:
@incollection{monil-fuzzy:2015,
  author = {Monil, Mohammad Alaul Haque And Rahman, Rashedur M.},
  editor = {Fatta, Giuseppe Di And Fortino, Giancarlo And Li, Wenfeng And Pathan, Mukaddim And Stahl, Frederic And Guerrieri, Antonio},
  title = {Fuzzy Logic Based Energy Aware Vm Consolidation},
  booktitle = {Internet And Distributed Computing Systems},
  publisher = {Springer International Publishing},
  year = {2015},
  number = {9258},
  pages = {31--38},
  note = {Doi: 10.1007/978-3-319-23237-94},
  doi = {10.1007/978-3-319-23237-9_4}
}
Morabito R (2015), "Power Consumption Of Virtualization Technologies: An Empirical Investigation", IEEE/ACM UCC 2015 SD3C. IEEE.
BibTeX:
@article{morabito-power:2015,
  author = {Morabito, Roberto},
  title = {Power Consumption Of Virtualization Technologies: An Empirical Investigation},
  journal = {IEEE/ACM UCC 2015 SD3C},
  publisher = {IEEE},
  year = {2015},
  url = {http://arxiv.org/abs/1511.01232}
}
Nguyen KK and Cheriet M (2015), "Environment-Aware Virtual Slice Provisioning in Green Cloud Environment", IEEE Transactions on Services Computing. Vol. 8(3), pp. 507-519.
Abstract: Environmental footprint resulting from datacenters activities can be reduced by both energy efficiency and renewable energy in a complementary fashion thanks to cloud computing paradigms. In a cloud hosting multi-tenant applications, virtual service providers can be provided with real-time recommendation techniques to allocate their virtual resources in edge, core, or access layers in an optimal way to minimize costs and footprint. Such a dynamic technique requires a flexible and optimized networking scheme to enable elastic virtual tenants spanning multiple physical nodes. In this paper, we investigate an environment-aware paradigm for virtual slices that allows improving energy efficiency and dealing with intermittent renewable power sources. A virtual slice consists of optimal flows assigned to virtual machines (VMs) in a virtual data center taking into account traffic requirements, VM locations, physical network capacity, and renewable energy availability. Considering various cloud consolidation schemes, we formulate and then propose an optimal solution for virtual slice assignment problem. Simulations on the GSN showed that the proposed model achieves better performance than the existing methods with respect to network footprint reductions.
BibTeX:
@article{nguyen-environment-aware:2015,
  author = {Nguyen, K. K. and Cheriet, M.},
  title = {Environment-Aware Virtual Slice Provisioning in Green Cloud Environment},
  journal = {IEEE Transactions on Services Computing},
  year = {2015},
  volume = {8},
  number = {3},
  pages = {507--519},
  doi = {10.1109/TSC.2014.2362544}
}
Oechsner S and Ripke A (2015), "Flexible support of VNF placement functions in OpenStack", In Network Softwarization (NetSoft), 2015 1st IEEE Conference on. , pp. 1-6. IEEE.
BibTeX:
@inproceedings{oechsner-flexible:2015,
  author = {Oechsner, Simon and Ripke, Andreas},
  title = {Flexible support of VNF placement functions in OpenStack},
  booktitle = {Network Softwarization (NetSoft), 2015 1st IEEE Conference on},
  publisher = {IEEE},
  year = {2015},
  pages = {1--6},
  url = {http://ieeexplore.ieee.org/abstract/document/7116178/}
}
Piatek W, Oleksiak A and Vor Dem Berge M (2015), "Modeling Impact Of Power- And Thermal-aware Fans Management On Data Center Energy Consumption", In Proceedings Of The 2015 ACM Sixth International Conference On Future Energy Systems. New York, Ny, Usa , pp. 253-258. ACM.
Abstract: In This Paper We Study The Power Usage And Thermal Management Of Micro Servers To Analyze Their Impact On The Overall Data Center Energy Consumption. We Propose Thermal Models Of Micro Servers Based On Analytical Approach Tuned With Parameters Derived From Empirical Tests. We Demonstrate How Fan Management Configuration Affects The Energy Consumption Of Servers And The Whole Data Center. We Also Apply The Proposed Model To Predict Temperature Changes In A Short Time Ahead And Take Advantage Of These Predictions To Improve Fan Management. We Show Why Pue Is Not Sufficient Or Can Be Even Misleading In Minimizing Data Center Energy Consumption. To Mitigate This Issue, We Propose Metrics That Can Be Used To Reflect Correctly Fans Management Impact On The Overall Energy Consumption.
BibTeX:
@inproceedings{piatek-modeling:2015,
  author = {Piatek, Wojciech And Oleksiak, Ariel And Vor Dem Berge, Micha},
  title = {Modeling Impact Of Power- And Thermal-aware Fans Management On Data Center Energy Consumption},
  booktitle = {Proceedings Of The 2015 ACM Sixth International Conference On Future Energy Systems},
  publisher = {ACM},
  year = {2015},
  pages = {253--258},
  doi = {10.1145/2768510.2768525}
}
Rizzon B, Clivillé V, Galichet S, Ochalek P and Ratajczak E (2015), "Decision Problem Of Instrumentation In A Company Involved In Iso 50001", In Industrial Engineering And Systems Management (iesm), 2015. , pp. 409-416. IEEE.
BibTeX:
@inproceedings{rizzon-decision:2015,
  author = {Rizzon, Bastien And Clivillé, Vincent And Galichet, Sylvie And Ochalek, Pascal And Ratajczak, Elodie},
  title = {Decision Problem Of Instrumentation In A Company Involved In Iso 50001},
  booktitle = {Industrial Engineering And Systems Management (iesm), 2015},
  publisher = {IEEE},
  year = {2015},
  pages = {409--416},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7380190}
}
Salam A, Gilani Z and Haq SU (2015), "Deploying And Managing A Cloud Infrastructure: Real-world Skills For The Comptia Cloud+ Certification And Beyond: Exam Cv0-001" John Wiley & Sons.
Abstract: Learn In-demand Cloud Computing Skills From Industry Experts Deploying And Managing A Cloud Infrastructure Is An Excellent Resource For It Professionals Seeking To Tap Into The Demand For Cloud Administrators. This Book Helps Prepare Candidates For The Comptia Cloud+ Certification (cv0-001) Cloud Computing Certification Exam. Designed For It Professionals With 2-3 Years Of Networking Experience, This Certification Provides Validation Of Your Cloud Infrastructure Knowledge. With Over 30 Years Of Combined Experience In Cloud Computing, The Author Team Provides The Latest Expert Perspectives On Enterprise-level Mobile Computing, And Covers The Most Essential Topics For Building And Maintaining Cloud-based Systems, Including: Understanding Basic Cloud-related Computing Concepts, Terminology, And Characteristics Identifying Cloud Delivery Solutions And Deploying New Infrastructure Managing Cloud Technologies, Services, And Networks Monitoring Hardware And Software Performance Featuring Real-world Examples And Interactive Exercises, Deploying And Managing Cloud Infrastructure Delivers Practical Knowledge You Can Apply Immediately. And, In Addition, You Also Get Access To A Full Set Of Electronic Study Tools Including: Interactive Test Environment Electronic Flashcards Glossary Of Key Terms Now Is The Time To Learn The Cloud Computing Skills You Need To Take That Next Step In Your It Career.
BibTeX:
@book{salam-deploying:2015,
  author = {Salam, Abdul And Gilani, Zafar And Haq, Salman Ul},
  title = {Deploying And Managing A Cloud Infrastructure: Real-world Skills For The Comptia Cloud+ Certification And Beyond: Exam Cv0-001},
  publisher = {John Wiley & Sons},
  year = {2015}
}
Stallings W (2015), "Foundations Of Modern Networking: Sdn, Nfv, Qoe, Iot, And Cloud" Addison-wesley Professional.
Abstract: Foundations Of Modern Networking Is A Comprehensive, Unified Survey Of Modern Networking Technology And Applications For Today’s Professionals, Managers, And Students. Dr. William Stallings Offers Clear And Well-organized Coverage Of Five Key Technologies That Are Transforming Networks: Software-defined Networks (sdn), Network Functions Virtualization (nfv), Quality Of Experience (qoe), The Internet Of Things (iot), And Cloudbased Services.   Dr. Stallings Reviews Current Network Ecosystems And The Challenges They Face--from Big Data And Mobility To Security And Complexity. Next, He Offers Complete, Self-contained Coverage Of Each New Set Of Technologies: How They Work, How They Are Architected, And How They Can Be Applied To Solve Real Problems. Dr. Stallings Presents A Chapter-length Analysis Of Emerging Security Issues In Modern Networks. He Concludes With An Up-to Date Discussion Of Networking Careers, Including Important Recent Changes In Roles And Skill Requirements.   Coverage:   Elements Of The Modern Networking Ecosystem: Technologies, Architecture, Services, And Applications Evolving Requirements Of Current Network Environments Sdn: Concepts, Rationale, Applications, And Standards Across Data, Control, And Application Planes Openflow, Opendaylight, And Other Key Sdn Technologies Network Functions Virtualization: Concepts, Technology, Applications, And Software Defined Infrastructure Ensuring Customer Quality Of Experience (qoe) With Interactive Video And Multimedia Network Traffic Cloud Networking: Services, Deployment Models, Architecture, And Linkages To Sdn And Nfv Iot And Fog Computing In Depth: Key Components Of Iot-enabled Devices, Model Architectures, And Example Implementations Securing Sdn, Nfv, Cloud, And Iot Environments Career Preparation And Ongoing Education For Tomorrow’s Networking Careers   Key Features:   Strong Coverage Of Unifying Principles And Practical Techniques More Than A Hundred Figures That Clarify Key Concepts Web Support At Williamstallings.com/network/ Qr Codes Throughout, Linking To The Website And Other Resources Keyword/acronym Lists, Recommended Readings, And Glossary Margin Note Definitions Of Key Words Throughout The Text
BibTeX:
@book{stallings-foundations:2015,
  author = {Stallings, William},
  title = {Foundations Of Modern Networking: Sdn, Nfv, Qoe, Iot, And Cloud},
  publisher = {Addison-wesley Professional},
  year = {2015}
}
Wang Y and Li X (2015), "Achieve high availability about point-single failures in OpenStack", In 2015 4th International Conference on Computer Science and Network Technology (ICCSNT). Vol. 01, pp. 45-48.
Abstract: This paper describes a method to solve the complexity of distributed environment in cloud technology and the single-point failure problem. In this paper, we take the virtual machine failure and host failure in OpenStack into consideration. The ability of fast restoration of this service is achieved by components in OpenStack which is called ceilometer and new components named Senlin. The function is achieved by adding patches and modifying the source code in OpenStack. Ceilometer plays a role of collecting information of the virtual machine failure and host failure in OpenStack and transfers this information to Senlin. Then Senlin can make a decision to restore the failure. This clever method achieves the high availability of OpenStack. We implement the proposed method and show the effectiveness of fast restoration.
BibTeX:
@inproceedings{wang-achieve:2015,
  author = {Wang, Yiping and Li, Xiaoyong},
  title = {Achieve high availability about point-single failures in OpenStack},
  booktitle = {2015 4th International Conference on Computer Science and Network Technology (ICCSNT)},
  year = {2015},
  volume = {01},
  pages = {45--48},
  doi = {10.1109/ICCSNT.2015.7490705}
}
Wang JV, Cheng C-t and Chi KT (2015), "A Power And Thermal-aware Virtual Machine Allocation Mechanism For Cloud Data Centers", In 2015 IEEE International Conference On Communication Workshop (iccw). , pp. 2850-2855. IEEE.
BibTeX:
@inproceedings{wang-power:2015,
  author = {Wang, Jing V. And Cheng, Chi-tsun And Chi, K. Tse},
  title = {A Power And Thermal-aware Virtual Machine Allocation Mechanism For Cloud Data Centers},
  booktitle = {2015 IEEE International Conference On Communication Workshop (iccw)},
  publisher = {IEEE},
  year = {2015},
  pages = {2850--2855},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7247611}
}
Critchley T (2014), "High Availability IT Services", December, 2014. CRC Press.
Abstract: This book starts with the basic premise that a service is comprised of the 3Ps—products, processes, and people. Moreover, these entities and their sub-entities interlink to support the services that end users require to run and support a business. This widens the scope of any availability design far beyond hardware and software. It also increases the potential for service failure for reasons beyond just hardware and software; the concept of logical outages.High Availability IT Services details the considerations for designing and running highly available ''services'' and not just the systems infrastructure that supports those services. Providing an overview of virtualization and cloud computing, it supplies a detailed look at availability, redundancy, fault tolerance, and security. It also stresses the importance of human factors.The book starts off by providing an availability primer and detailing the reasons why you need to be concerned with high availability. Next, it outlines the theory of reliability and availability and the elements of actual practices in this high availability (HA) area, including Service Level Agreements (SLAs) and Change Management.Examining what the major hardware and software vendors have to offer in the HA world, the book considers the ubiquitous world of clouds and virtualization as well as the availability considerations they present.The book examines high availability concepts and architectures such as reliability, availability, and serviceability (RAS); clusters; grids; and redundant arrays of independent disks (RAID) storage. It also covers the role of security in providing high availability, cluster offerings, emergent Linux clusters, online transaction processing (OLTP), and relational databases.
BibTeX:
@book{critchley-high:2014,
  author = {Critchley, Terry},
  title = {High Availability IT Services},
  publisher = {CRC Press},
  year = {2014}
}
Geng H (2014), "Data Center Handbook", December, 2014. John Wiley & Sons.
Abstract: Provides The Fundamentals, Technologies, And Best Practices In Designing, Constructing And Managing Mission Critical, Energy Efficient Data Centers Organizations In Need Of High-speed Connectivity And Nonstop Systems Operations Depend Upon Data Centers For A Range Of Deployment Solutions. A Data Center Is A Facility Used To House Computer Systems And Associated Components, Such As Telecommunications And Storage Systems. It Generally Includes Multiple Power Sources, Redundant Data Communications Connections, Environmental Controls (e.g., Air Conditioning, Fire Suppression) And Security Devices. With Contributions From An International List Of Experts, The Data Center Handbook Instructs Readers To: Prepare Strategic Plan That Includes Location Plan, Site Selection, Roadmap And Capacity Planning Design And Build “green” Data Centers, With Mission Critical And Energy-efficient Infrastructure Apply Best Practices To Reduce Energy Consumption And Carbon Emissions Apply It Technologies Such As Cloud And Virtualization Manage Data Centers In Order To Sustain Operations With Minimum Costs Prepare And Practice Disaster Reovery And Business Continuity Plan The Book Imparts Essential Knowledge Needed To Implement Data Center Design And Construction, Apply It Technologies, And Continually Improve Data Center Operations.
BibTeX:
@book{geng-data:2014,
  author = {Geng, Hwaiyu},
  title = {Data Center Handbook},
  publisher = {John Wiley & Sons},
  year = {2014}
}
Rossigneux F, Lefevre L, Gelas JP and Assuncao MDD (2014), "A Generic And Extensible Framework For Monitoring Energy Consumption Of Openstack Clouds", In 2014 IEEE 4th Bdcloud., December, 2014. , pp. 696-702.
Abstract: Although Cloud Computing Has Been Transformational To The It Industry, It Is Built On Large Data Centres That Often Consume Massive Amounts Of Electrical Power. Efforts Have Been Made To Reduce The Energy Clouds Consume, With Certain Data Centres Now Approaching A Power Usage Effectiveness (pue) Factor Of 1.08. While This Is An Incredible Mark, It Also Means That The It Infrastructure Accounts For A Large Part Of The Power Consumed By A Data Centre. Hence, Means To Monitor And Analyse How Energy Is Spent Have Never Been So Crucial. Such Monitoring Is Required Not Only For Understanding How Power Is Consumed, But Also For Assessing The Impact Of Energy Management Policies. In This Article, We Draw Lessons From Experience On Monitoring Large-scale Systems And Introduce An Energy Monitoring Software Framework Called Kilo Watt Api (kwapi), Able To Handle Open-stack Clouds. The Framework - Whose Architecture Is Scalable, Extensible, And Completely Integrated Into Open Stack - Supports Several Wattmeter Devices, Multiple Measurement Formats, And Minimises Communication Overhead.
BibTeX:
@inproceedings{rossigneux-generic:2014,
  author = {Rossigneux, F. And Lefevre, L. And Gelas, J. P. And Assuncao, M. D. D},
  title = {A Generic And Extensible Framework For Monitoring Energy Consumption Of Openstack Clouds},
  booktitle = {2014 IEEE 4th Bdcloud},
  year = {2014},
  pages = {696--702},
  doi = {10.1109/BDCloud.2014.105}
}
Wang Y, Yang R, Wo T, Jiang W and Hu C (2014), "Improving utilization through dynamic VM resource allocation in hybrid cloud environment", In 2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)., December, 2014. , pp. 241-248.
Abstract: Virtualization is one of the most fascinating techniques because it can facilitate the infrastructure management and provide isolated execution for running workloads. Despite the benefits gained from virtualization and resource sharing, improved resource utilization is still far from settled due to the dynamic resource requirements and the widely-used over-provision strategy for guaranteed QoS. Additionally, with the emerging demands for big data analytic, how to effectively manage hybrid workloads such as traditional batch task and long-running virtual machine (VM) service needs to be dealt with. In this paper, we propose a system to combine long-running VM service with typical batch workload like MapReduce. The objectives are to improve the holistic cluster utilization through dynamic resource adjustment mechanism for VM without violating other batch workload executions. Furthermore, VM migration is utilized to ensure high availability and avoid potential performance degradation. The experimental results reveal that the dynamically allocated memory is close to the real usage with only 10% estimation margin, and the performance impact on VM and MapReduce jobs are both within 1%. Additionally, at most 50% increment of resource utilization could be achieved. We believe that these findings are in the right direction to solving workload consolidation issues in hybrid computing environments.
BibTeX:
@inproceedings{wang-improving:2014,
  author = {Wang, Y. and Yang, R. and Wo, T. and Jiang, W. and Hu, C.},
  title = {Improving utilization through dynamic VM resource allocation in hybrid cloud environment},
  booktitle = {2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)},
  year = {2014},
  pages = {241--248},
  doi = {10.1109/PADSW.2014.7097814}
}
Hoogendoorn B, Guerra D and Zwan PVD (2014), "What Drives Environmental Practices Of Smes?", Small Bus Econ., November, 2014. Vol. 44(4), pp. 759-781.
Abstract: The Objective Of This Paper Is To Develop A Better Understanding Of What Drives Small And Medium-sized Enterprises (smes) To Engage In Environmental Practices, And Whether The Drivers Differ Across Types Of Practices. Two Types Of Environmental Practices Are Distinguished: Practices Related To Production Processes (greening Processes) And Practices Related To Products And Services (greening Product And Service Offerings). Despite A Growing Literature On Socially Responsible Behavior Of Large Firms, The Role Of Smes Remains Underexposed. This Neglect Is Remarkable Given The Substantial Impact Of Smes On The Economy And The Natural Environment. By Using Unique Data For Almost 8,000 Smes Across 12 Sectors In 36 Countries, We Study The Influence Of Firm Characteristics On Smes’ Environmental Behavior. Our Results Suggest That Different Characteristics Have Dissimilar Influences On Both Types Of Environmental Practices Such As The Type Of Customers Served. Stringent Environmental Legislation Encourages Firms To Actively Take On Environmental Activities, But Only In Case Of Green Products And Services. Moreover, The Dominant Idea That Small Firms Are Reluctant To Invest In Environmental Practices Is Clearly More Nuanced: Firm Size Matters Most For Engagement In Greening Processes. Finally, Smes Active In Tangible Sectors And That Receive Financial Support Are More Involved In Either Type Of Environmental Practices.
BibTeX:
@article{hoogendoorn-what:2014,
  author = {Hoogendoorn, Brigitte And Guerra, Daniela And Zwan, Peter Van Der},
  title = {What Drives Environmental Practices Of Smes?},
  journal = {Small Bus Econ},
  year = {2014},
  volume = {44},
  number = {4},
  pages = {759--781},
  doi = {10.1007/s11187-014-9618-9}
}
Nanduri R, Kakadia D and Varma V (2014), "Energy and SLA aware VM Scheduling", arXiv:1411.6114 [cs]., November, 2014.
Abstract: With the advancement of Cloud Computing over the past few years, there has been a massive shift from traditional data centers to cloud enabled data centers. The enterprises with cloud data centers are focusing their attention on energy savings through effective utilization of resources. In this work, we propose algorithms which try to minimize the energy consumption in the data center duly maintaining the SLA guarantees. The algorithms try to utilize least number of physical machines in the data center by dynamically rebalancing the physical machines based on their resource utilization. The algorithms also perform an optimal consolidation of virtual machines on a physical machine, minimizing SLA violations. In extensive simulation, our algorithms achieve savings of about 21% in terms of energy consumption and in terms of maintaining the SLAs, it performs 60% better than Single Threshold algorithm.
BibTeX:
@article{nanduri-energy:2014,
  author = {Nanduri, Radheshyam and Kakadia, Dharmesh and Varma, Vasudeva},
  title = {Energy and SLA aware VM Scheduling},
  journal = {arXiv:1411.6114 [cs]},
  year = {2014},
  note = {arXiv: 1411.6114},
  url = {http://arxiv.org/abs/1411.6114}
}
Villars RL and Shirer M (2014), "Idc Finds Growth, Consolidation, And Changing Ownership Patterns In Worldwide Datacenter Forecast". November, 2014.
Abstract: A New Forecast From International Data Corporation (idc) Shows That The Transition To The 3rd Platform Is Having A Direct Impact On Datacenter Construction And Remodeling. Idc Expects The Total Number Of Datacenters (all Types) Deployed Worldwide Will Peak At 8.6 Million In 2017 And Then Begin To Decline Slowly. This Shift Will Be Triggered By A Decline In Internal Datacenter Server Rooms Starting In 2016 And Internal Server Closets Starting In 2017. All Other Datacenter Categories Will Continue To Grow Throughout The Forecast Period, With The Number Of Service Provider Datacenters Increasing Much Faster. Despite A Decline In The Number Of Datacenters, Total Worldwide Datacenter Space Will Continue To Increase, Growing From 1.58 Billion Square Feet In 2013 To 1.94 Billion Square Feet In 2018
BibTeX:
@misc{villars-idc:2014,
  author = {Villars, Richard L. And Shirer, Michael},
  title = {Idc Finds Growth, Consolidation, And Changing Ownership Patterns In Worldwide Datacenter Forecast},
  journal = {Www.idc.com},
  year = {2014},
  url = {http://www.idc.com/getdoc.jsp?containerId=prUS25237514}
}
An K, Shekhar S, Caglar F, Gokhale A and Sastry S (2014), "A cloud middleware for assuring performance and high availability of soft real-time applications", Journal of Systems Architecture., October, 2014. Vol. 60(9), pp. 757-769.
Abstract: Applications are increasingly being deployed in the cloud due to benefits stemming from economy of scale, scalability, flexibility and utility-based pricing model. Although most cloud-based applications have hitherto been enterprise-style, there is an emerging need for hosting real-time streaming applications in the cloud that demand both high availability and low latency. Contemporary cloud computing research has seldom focused on solutions that provide both high availability and real-time assurance to these applications in a way that also optimizes resource consumption in data centers, which is a key consideration for cloud providers. This paper makes three contributions to address this dual challenge. First, it describes an architecture for a fault-tolerant framework that can be used to automatically deploy replicas of virtual machines in data centers in a way that optimizes resources while assuring availability and responsiveness. Second, it describes the design of a pluggable framework within the fault-tolerant architecture that enables plugging in different placement algorithms for VM replica deployment. Third, it illustrates the design of a framework for real-time dissemination of resource utilization information using a real-time publish/subscribe framework, which is required by the replica selection and placement framework. Experimental results using a case study that involves a specific replica placement algorithm are presented to evaluate the effectiveness of our architecture.
BibTeX:
@article{an-cloud:2014,
  author = {An, Kyoungho and Shekhar, Shashank and Caglar, Faruk and Gokhale, Aniruddha and Sastry, Shivakumar},
  title = {A cloud middleware for assuring performance and high availability of soft real-time applications},
  journal = {Journal of Systems Architecture},
  year = {2014},
  volume = {60},
  number = {9},
  pages = {757--769},
  url = {http://www.sciencedirect.com/science/article/pii/S1383762114000253},
  doi = {10.1016/j.sysarc.2014.01.009}
}
Mescheryakov S, Shchemelinin D and Efimov V (2014), "Adaptive Control Of Cloud Computing Resources In The Internet Telecommunication Multiservice System", In 2014 6th International Congress On Ultra Modern Telecommunications And Control Systems And Workshops (icumt)., October, 2014. , pp. 287-293.
Abstract: In The Last Decade, The Internet Telecommunication Companies Are Growing Rapidly And Now Are Based On The Cloud Computing Environments. Management Of A Big Distributed Production Infrastructure With Multiple Business Services Requires A Centralized Control System. This Paper Describes How The Zabbix Enterprise-class Monitoring System Can Be Used As An Adaptive Solution For The Purpose Of Real-time Control Of Cloud Computing Resources, Auto-detection Of Critical Anomalies In Advance And, When Possible, Auto-restore Production Services Using A Predefined Workaround Procedure. Real-world Company Examples Are Provided.
BibTeX:
@inproceedings{mescheryakov-adaptive:2014,
  author = {Mescheryakov, S. And Shchemelinin, D. And Efimov, V.},
  title = {Adaptive Control Of Cloud Computing Resources In The Internet Telecommunication Multiservice System},
  booktitle = {2014 6th International Congress On Ultra Modern Telecommunications And Control Systems And Workshops (icumt)},
  year = {2014},
  pages = {287--293},
  doi = {10.1109/ICUMT.2014.7002117}
}
Parziale L, Lasmayous G, Pattabhiraman MS, Reed K, Zhang JX and Redbooks IBM (2014), "End-to-End High Availability Solution for System z from a Linux Perspective", October, 2014. IBM Redbooks.
Abstract: As Linux on System z becomes more prevalent and mainstream in the industry, the need for it to deliver higher levels of availability is increasing. This IBM Redbooks publication starts with an explanation of high availability (HA) fundamentals such as HA concepts and terminology. It continues with a discussion of why a business needs to consider an HA solution and then explains how to determine your business single points of failure. We outline the components of a high availability solution and describe these components. Then we provide some architectural scenarios and demonstrate how to plan and decide an implementation of an end-to-end HA solution, from Linux on System z database scenarios to z/OS, and include storage, network, z/VM, Linux, and middleware. This implementation includes the IBM Tivoli System Automation for Multiplatforms (TSA MP), which monitors and automates applications distributed across Linux, AIX®, and z/OS® operating systems, as well as a GDPS based solution. It includes the planning for an end-to-end scenario, considering Linux on System z, z/VM, and z/OS operating environments, and the middleware used. The TSA MP implements HA for infrastructure, network, operating systems, and applications across multiple platforms and is compared to a Linux HA implementation based on open source Linux-HA, which is Linux only.
BibTeX:
@book{parziale-end-end:2014,
  author = {Parziale, Lydia and Lasmayous, Guillaume and Pattabhiraman, Manoj S. and Reed, Karen and Zhang, Jing Xin and Redbooks, I. B. M.},
  title = {End-to-End High Availability Solution for System z from a Linux Perspective},
  publisher = {IBM Redbooks},
  year = {2014}
}
Wolski R and Brevik J (2014), "Using Parametric Models to Represent Private Cloud Workloads", IEEE Transactions on Services Computing., October, 2014. Vol. 7(4), pp. 714-725.
Abstract: Cloud computing has become a popular metaphor for dynamic and secure self-service access to computational and storage capabilities. In this study, we analyze and model workloads gathered from enterprise-operated commercial private clouds that implement “Infrastructure as a Service.” Our results show that 3-phase hyperexponential distributions fit using the Estimation Maximization (E-M) algorithm capture workload attributes accurately. In addition, these models of individual attributes compose to produce estimates of overall cloud performance that our results verify to be accurate. As an early study of commercial enterprise private clouds, this work provides guidance to those researching, designing, or maintaining such installations. In particular, the cloud workloads under study do not exhibit “heavy-tailed” distributional properties in the same way that “bare metal” operating systems do, potentially leading to different design and engineering tradeoffs.
BibTeX:
@article{wolski-using:2014,
  author = {Wolski, R. and Brevik, J.},
  title = {Using Parametric Models to Represent Private Cloud Workloads},
  journal = {IEEE Transactions on Services Computing},
  year = {2014},
  volume = {7},
  number = {4},
  pages = {714--725},
  doi = {10.1109/TSC.2013.48}
}
Brunelli D, Minakov I, Passerone R and Rossi M (2014), "POVOMON: An Ad-hoc Wireless Sensor Network for indoor environmental monitoring", In 2014 IEEE Workshop on Environmental, Energy, and Structural Monitoring Systems Proceedings., September, 2014. , pp. 1-6.
Abstract: Wireless Sensor Networks (WSN) are a versatile technology that offers the ability to monitor real-world phenomena in detail and at large scale in scenarios where wired infrastructures are inapplicable or expensive. In this paper we present an ad-hoc WSN deployment for indoor environmental quality monitoring in office buildings. The indoor environmental quality and balance between inhabitant comfort level and power demands are the main objectives of this network. The presented system consists of 19 sensor devices continuously measuring vibration, temperature, humidity, light, and carbon dioxide levels in working areas. The power load of the building is measured by dedicated current sensor devices. Preliminary laboratory tests and data sets collected during 4 months of real world operation show that our system provides an accurate monitoring of indoor environmental parameters delivering high data reliability with an estimated lifetime exceeding 1.5 years, without the gas sensors. The paper presents the HW/SW architecture, the network infrastructure of the deployment and analyzes real measurement data.
BibTeX:
@inproceedings{brunelli-povomon:2014,
  author = {Brunelli, D. and Minakov, I. and Passerone, R. and Rossi, M.},
  title = {POVOMON: An Ad-hoc Wireless Sensor Network for indoor environmental monitoring},
  booktitle = {2014 IEEE Workshop on Environmental, Energy, and Structural Monitoring Systems Proceedings},
  year = {2014},
  pages = {1--6},
  doi = {10.1109/EESMS.2014.6923287}
}
Pultz JE, Cappuccio DJ, Adams A, Silva FD, Mishra N, Cecci H and Kumar R (2014), "Magic Quadrant For Data Center Infrastructure Management Tools". Thesis at: Gartner Inc., September, 2014. (G00259286), pp. 17.
Abstract: Data Center Infrastructure Management Tools Optimize Data Centers By Monitoring And Managing It And Facilities Resources And Energy Consumption. Data Center And Facilities Managers Can Use This Magic Quadrant To Identify Dcim Technology Providers And Determine Which Meet Their Prioritized Needs.
BibTeX:
@techreport{pultz-magic:2014,
  author = {Pultz, Jay E. And Cappuccio, David J. And Adams, April And Silva, Federico De And Mishra, Naveen And Cecci, Henrique And Kumar, Rakesh},
  title = {Magic Quadrant For Data Center Infrastructure Management Tools},
  school = {Gartner Inc},
  year = {2014},
  number = {G00259286},
  pages = {17}
}
Rotem E, Weisser UC, Mendelson A, Yassin A and Ginosar R (2014), "Energy Management Of Highly Dynamic Server Workloads In An Heterogeneous Data Center", In 2014 24th International Workshop On Power And Timing Modeling, Optimization And Simulation (patmos)., September, 2014. , pp. 1-5.
Abstract: We Propose A Hybrid Management Model To Address Heterogeneous Data Center Energy Efficiency With Highly Dynamic Workload. A Central Dispatch And Control Algorithm With Distributed System Energy Management Was Implemented And Validated On Real Processor And System. We Demonstrate Up To 20% Energy Savings (11% Average) Without Compromising Quality Of Service. Additional 5% Average Energy Savings Was Achieved By Exploiting System Heterogeneity.
BibTeX:
@inproceedings{rotem-energy:2014,
  author = {Rotem, E. And Weisser, U. C. And Mendelson, A. And Yassin, A. And Ginosar, R.},
  title = {Energy Management Of Highly Dynamic Server Workloads In An Heterogeneous Data Center},
  booktitle = {2014 24th International Workshop On Power And Timing Modeling, Optimization And Simulation (patmos)},
  year = {2014},
  pages = {1--5},
  doi = {10.1109/PATMOS.2014.6951868}
}
Ferdaus MH, Murshed M, Calheiros RN and Buyya R (2014), "Virtual Machine Consolidation in Cloud Data Centers Using ACO Metaheuristic", In Euro-Par 2014 Parallel Processing., aug, 2014. , pp. 306-317. Springer, Cham.
Abstract: In this paper, we propose the AVVMC VM consolidation scheme that focuses on balanced resource utilization of servers across different computing resources (CPU, memory, and network I/O) with the goal of minimizing power consumption and resource wastage. Since the VM consolidation problem is strictly NP-hard and computationally infeasible for large data centers, we propose adaptation and integration of the Ant Colony Optimization (ACO) metaheuristic with balanced usage of computing resources based on vector algebra. Our simulation results show that AVVMC outperforms existing methods and achieves improvement in both energy consumption and resource wastage reduction.
BibTeX:
@inproceedings{ferdaus-virtual:2014,
  author = {Ferdaus, Md Hasanul and Murshed, Manzur and Calheiros, Rodrigo N. and Buyya, Rajkumar},
  title = {Virtual Machine Consolidation in Cloud Data Centers Using ACO Metaheuristic},
  booktitle = {Euro-Par 2014 Parallel Processing},
  publisher = {Springer, Cham},
  year = {2014},
  pages = {306--317},
  url = {https://link.springer.com/chapter/10.1007/978-3-319-09873-9_26},
  doi = {10.1007/978-3-319-09873-9_26}
}
Thibodeau P (2014), "Data Centers Are The New Polluters". August, 2014.
Abstract: U.s. Data Centers Use More Electricity Than They Need, A New Report Finds, And It Managers Are Too Cautious About Managing Power And Businesses Are Unwilling To Invest In Energy Conservation.
BibTeX:
@misc{thibodeau-data:2014,
  author = {Thibodeau, Patrick},
  title = {Data Centers Are The New Polluters},
  journal = {Computerworld},
  year = {2014},
  url = {http://www.computerworld.com/article/2598562/data-center/data-centers-are-the-new-polluters.html}
}
David B (2014), "Benchmarking, Consistency, Distributed Database Management Systems, Distributed Systems, Eventual Consistency", July, 2014. KIT Scientific Publishing.
BibTeX:
@book{david-benchmarking:2014,
  author = {David, Bermbach},
  title = {Benchmarking, Consistency, Distributed Database Management Systems, Distributed Systems, Eventual Consistency},
  publisher = {KIT Scientific Publishing},
  year = {2014}
}
Mahdavi R (2014), "Data Center Energy Efficiency Measurement Assessment Kit Guide And Specification". Thesis at: U.s. Department Of Energy. Lawrence Berkeley National Laboratory, July, 2014. , pp. 20.
Abstract: The Purpose Of This Document Is To Empower Data Center Owners And Operators With Information On The Importance Of Energy Assessments And Provide Structured Guidance For Conducting Them. Additionally, This Guide Covers How A Portable And Temporary Wireless Mesh Assessment Kit Can Be Used To Speed Up The Process And Reduce The Costs Of A Data Center Energy Use Assessment And Overcome The Issues With Respect To Shutdowns. This Kit Is Suitable Only For Data Centers With Air-cooled It Equipment. Author(s): Mahdavi, R.
BibTeX:
@techreport{mahdavi-data:2014,
  author = {Mahdavi, Rod},
  title = {Data Center Energy Efficiency Measurement Assessment Kit Guide And Specification},
  school = {U.s. Department Of Energy},
  year = {2014},
  pages = {20},
  url = {https://datacenters.lbl.gov/resources/data-center-energy-efficiency}
}
Caglar F, Shekhar S and Gokhale A (2014), "iPlace: An Intelligent and Tunable Power- and Performance-Aware Virtual Machine Placement Technique for Cloud-Based Real-Time Applications", In 2014 IEEE 17th International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing., June, 2014. , pp. 48-55.
Abstract: Power and performance tradeoffs are critical and challenging issues faced by cloud service providers (CSPs) while managing their data centers. On the one hand, CSPs strive to reduce power consumption of their data centers to not only decrease their energy costs but to also reduce adverse impact on the environment. On the other hand, CSPs must deliver performance expected by the applications hosted in their cloud in accordance with predefined Service Level Agreements (SLAs). Not doing so will lead to loss of customers and thereby major revenue losses for the CSPs. Addressing these dual set of challenges is hard for the CSPs because power management and performance assurance are conflicting objectives, particularly in the context of multi-tenant cloud systems where multiple virtual machines (VMs) may be hosted on a single physical server. The problem becomes even harder when real-time applications are hosted in these VMs. To address these challenges and make appropriate tradeoffs, we present iPlace, which is an intelligent and tunable power- and performance-aware VM placement middleware. The placement strategy is based on a two-level artificial neural network which predicts (1) CPU usage at the first level, and (2) power consumption and performance of a host machine at the second level that uses the predicted CPU usage. The efficacy of iPlace is evaluated in the context of a VM consolidation algorithm that is applied to running virtual machines and host machines in a private cloud.
BibTeX:
@inproceedings{caglar-iplace::2014,
  author = {Caglar, F. and Shekhar, S. and Gokhale, A.},
  title = {iPlace: An Intelligent and Tunable Power- and Performance-Aware Virtual Machine Placement Technique for Cloud-Based Real-Time Applications},
  booktitle = {2014 IEEE 17th International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing},
  year = {2014},
  pages = {48--55},
  doi = {10.1109/ISORC.2014.35}
}
Bachar Y and Simpkins A (2014), "Introducing ``wedge'' And ``fboss'', The Next Steps Toward A Disaggregated Network". June, 2014.
Abstract: We’re Big Believers In The Value Of Disaggregation -- Of Breaking Down Traditional Data Center Technologies Into Their Core Components So We Can Build New Systems That Are More Flexible, More Scalable, And More Efficient. This Approach Has Guided Facebook From The Beginning, As We’ve Grown And Expanded Our Infrastructure To Connect More Than 1.28 Billion People Around The World.
BibTeX:
@misc{facebook-code:2014,
  author = {Bachar, Yuval And Simpkins, Adam},
  title = {Introducing ``wedge'' And ``fboss'', The Next Steps Toward A Disaggregated Network},
  journal = {Facebook Code},
  year = {2014}
}
Marko K (2014), "Facebook Hastens The Era Of Open Source Hardware". June, 2014.
Abstract: If Necessity Is The Mother Of Invention, Then The Need For Cheap, Efficient, Compact Servers And Network Equipment Has Made Facebook An Innovator Of Gear Optimized For Cloud Data Centers And Workloads. It’s Leadership In The Development Of Standard, Hyper Scale Hardware Using Interchangeable Commodity Components Led To The Open [...]
BibTeX:
@misc{forbes-facebook:2014,
  author = {Marko, Kurt},
  title = {Facebook Hastens The Era Of Open Source Hardware},
  journal = {Forbes},
  year = {2014}
}
Unuvar M, Doganata Y, Steinder M, Tantawi A and Tosi S (2014), "A Predictive Method for Identifying Optimum Cloud Availability Zones", In 2014 IEEE 7th International Conference on Cloud Computing., jun, 2014. , pp. 72-79.
Abstract: Cloud service providers enable enterprises with the ability to place their business applications into availability zones across multiple locations worldwide. While this capability helps achieve higher availability with smaller failure rates, business applications deployed across these independent zones may experience different Quality of Service (QoS) due to heterogeneous physical infrastructures. Since the perceived QoS against specific requirements are not usually advertised by cloud providers, selecting an availability zone that would best satisfy the user requirements is a challenge. In this paper, we introduce a predictive approach to identify the cloud availability zone that maximizes satisfaction of an incoming request against a set of requirements. The predictive models are built from historical usage data for each availability zone and are updated as the nature of the zones and requests change. Simulation results show that our method successfully predicts the unpublished zone behavior from historical data and identifies the availability zone that maximizes user satisfaction against specific requirements.
BibTeX:
@inproceedings{unuvar-predictive:2014,
  author = {Unuvar, M. and Doganata, Y. and Steinder, M. and Tantawi, A. and Tosi, S.},
  title = {A Predictive Method for Identifying Optimum Cloud Availability Zones},
  booktitle = {2014 IEEE 7th International Conference on Cloud Computing},
  year = {2014},
  pages = {72--79},
  doi = {10.1109/CLOUD.2014.20}
}
Muchalski F and Maziero C (2014), "Alocaccão de Máquinas Virtuais em Ambientes de Computaccão em Nuvem Considerando o Compartilhamento de Memória", In XII Workshop de Computaccão em Clouds e Aplicaccões - WCGA 2014. Florianópolis/SC--Brazil, may, 2014. , pp. 81-92. XII WCGA 2014.
Abstract: Nos ambientes de computaccão em nuvem, é importante manter sob controle a alocaccão de máquinas virtuais nos servidores físicos. Uma alocaccão adequada implica na reduccão de custos com hardware, energia e refrigeraccão, além da melhora da qualidade de servicco. Hipervisores recentes implementam mecanismos para reduzir o consumo de memória RAM através do comparti-lhamento de páginas idênticas entre máquinas virtuais. Este artigo apresenta um novo algoritmo de alocaccão de máquinas virtuais que busca o equilíbrio no uso dos recursos de CPU, memória, disco e rede e, sobretudo, considera o potencial de compartilhamento de memória entre máquinas virtuais. Através de simulaccões em três cenários distintos, verificou-se que o algoritmo é superior à abordagem padrão na questão do uso equilibrado de recursos e que, con-siderando o compartilhamento de memória, houve um ganho significativo na disponibilidade deste recurso ao final das alocaccões. Abstract. In cloud computing environments it is important to keep under control the allocation of virtual machines in physical servers. A good allocation brings benefits such as reduction costs in hardware, power, and cooling, also improving the quality of service. Recent hypervisors implement mechanisms to reduce RAM consumption by sharing identical pages between virtual machines. This paper presents a new algorithm for virtual machines allocation that seeks the balanced use of CPU, memory, disk, and network. In addition, it considers the potential for sharing memory among virtual machines. Simulations on three distinct scenarios demonstrate that it is superior to the standard approach when considering the balanced use of resources. Considering shared memory, there was an appreciable gain in availability of resources.
BibTeX:
@inproceedings{muchalski-alocacao:2014,
  author = {Muchalski, Fernando and Maziero, Carlos},
  title = {Alocaccão de Máquinas Virtuais em Ambientes de Computaccão em Nuvem Considerando o Compartilhamento de Memória},
  booktitle = {XII Workshop de Computaccão em Clouds e Aplicaccões - WCGA 2014},
  publisher = {XII WCGA 2014},
  year = {2014},
  pages = {81--92}
}
Marik O and Zitta S (2014), "Comparative Analysis Of Monitoring System For Data Networks", In 2014 International Conference On Multimedia Computing And Systems (icmcs)., April, 2014. , pp. 563-568.
Abstract: The Document Is Focused On Comparative Analysis Of Monitoring Systems For Data Networks And Their Subsequent Application To The Data Network With Smaller Range. In The Text, The Author Describes Methodology Of Comparison Of Monitoring Systems And A Description Of Their Implementation And Testing. In The Introduction The Document Deals With The Theory Of Design Patterns For Deployment Of Monitoring Systems.
BibTeX:
@inproceedings{marik-comparative:2014,
  author = {Marik, O. And Zitta, S.},
  title = {Comparative Analysis Of Monitoring System For Data Networks},
  booktitle = {2014 International Conference On Multimedia Computing And Systems (icmcs)},
  year = {2014},
  pages = {563--568},
  doi = {10.1109/ICMCS.2014.6911307}
}
Mittal S (2014), "Power Management Techniques For Data Centers: A Survey", Corr., April, 2014.
Abstract: With Growing Use Of Internet And Exponential Growth In Amount Of Data To Be Stored And Processed (known As 'big Data'), The Size Of Data Centers Has Greatly Increased. This, However, Has Resulted In Significant Increase In The Power Consumption Of The Data Centers. For This Reason, Managing Power Consumption Of Data Centers Has Become Essential. In This Paper, We Highlight The Need Of Achieving Energy Efficiency In Data Centers And Survey Several Recent Architectural Techniques Designed For Power Management Of Data Centers. We Also Present A Classification Of These Techniques Based On Their Characteristics. This Paper Aims To Provide Insights Into The Techniques For Improving Energy Efficiency Of Data Centers And Encourage The Designers To Invent Novel Solutions For Managing The Large Power Dissipation Of Data Centers.
BibTeX:
@article{mittal-power:2014,
  author = {Mittal, Sparsh},
  title = {Power Management Techniques For Data Centers: A Survey},
  journal = {Corr},
  year = {2014},
  note = {Arxiv: 1404.6681}
}
Turner V, Reinsel D, Gantz JF and Minton S (2014), "The Digital Universe Of Opportunities: Rich Data And The Increasing Value Of The Internet Of Things". Thesis at: Emc And Idc. Usa, April, 2014. (1672), pp. 10.
BibTeX:
@techreport{turner-digital:2014,
  author = {Turner, Vernon And Reinsel, David And Gantz, John F. And Minton, Stephen},
  title = {The Digital Universe Of Opportunities: Rich Data And The Increasing Value Of The Internet Of Things},
  school = {Emc And Idc},
  year = {2014},
  number = {1672},
  pages = {10},
  url = {http://www.emc.com/leadership/digital-universe/2014iview/executive-summary.htm}
}
Alahmadi A, Alnowiser A, Zhu MM, Che D and Ghodous P (2014), "Enhanced First-Fit Decreasing Algorithm for Energy-Aware Job Scheduling in Cloud", In 2014 International Conference on Computational Science and Computational Intelligence., March, 2014. Vol. 2, pp. 69-74.
Abstract: With the emerging of many data centers around the globe, heavy loads of large-scale commercial and scientific applications executed in the cloud call for efficient cloud resource management strategies to save energy without compromising the performance and system throughput. According to the statistics from the Data Centre Dynamic (DCD) organization, the expected energy consumption by computer servers would increase by 19% in 2013 compared with the previous year. Such trend may continue for many years. Moreover, the estimated energy consumption of computers in the U.S. was about 2% out of the total electricity consumption in 2010, which makes IT industry the second pollution contributor after aviation. In this paper, a novel approach for scheduling, sharing and migrating Virtual Machines (VMs) for a bag of cloud tasks is designed and developed to reduce energy consumption with guaranteed certain execution time and high system throughput. This approach is derived from an Enhanced First Fit Decreasing (EFFD) algorithm combined with our VM reuse strategy. Furthermore, virtual machine migration method is introduced to dynamically monitor the cloud situation for necessary migration. Our simulation results using Cloud Report show that EFFD with our VM reuse strategy gains higher resource utilization rate and lower energy consumption than Greedy, Round Robin (RR) and FDD without VM reuse.
BibTeX:
@inproceedings{alahmadi-enhanced:2014,
  author = {Alahmadi, A. and Alnowiser, A. and Zhu, M. M. and Che, D. and Ghodous, P.},
  title = {Enhanced First-Fit Decreasing Algorithm for Energy-Aware Job Scheduling in Cloud},
  booktitle = {2014 International Conference on Computational Science and Computational Intelligence},
  year = {2014},
  volume = {2},
  pages = {69--74},
  doi = {10.1109/CSCI.2014.97}
}
Cérin C, Coti C, Delort P, Diaz F, Gagnaire M, Mijic M, Gaumer Q, Guillaume N, Lous JL, Lubiarz S, Raffaelli J-L, Shiozaki K, Schauer H, Smets J-P, Séguin L and Ville A (2014), "Downtime Statistics of Current Cloud Solutions". Thesis at: International Working Group on Cloud Computing Resiliency (IWGCR). France, March, 2014. (Rev 03-2014), pp. 5.
BibTeX:
@techreport{cerin-downtime:2014,
  author = {Cérin, Christophe and Coti, Camille and Delort, Pierre and Diaz, Felipe and Gagnaire, Maurice and Mijic, Marija and Gaumer, Quentin and Guillaume, Nicolas and Lous, Jonathan Le and Lubiarz, Stephane and Raffaelli, Jean-Luc and Shiozaki, Kazuhiko and Schauer, Hervé and Smets, Jean-Paul and Séguin, Laurent and Ville, Alexandrine},
  title = {Downtime Statistics of Current Cloud Solutions},
  school = {International Working Group on Cloud Computing Resiliency (IWGCR)},
  year = {2014},
  number = {Rev 03-2014},
  pages = {5},
  url = {http://iwgcr.org/wp-content/uploads/2014/03/downtime-statistics-current-1.3.pdf}
}
Wiboonrat M (2014), "Data Center Infrastructure Management Wlan Networks For Monitoring And Controlling Systems", In 2014 Icoin., February, 2014. , pp. 226-231.
Abstract: Constraints Of Space, Network, Power, And Cooling, Along With The Vast Complexity Of Managing A Large Data Center, Have Given Ascend To A New Principle Of Tools With Integrated Processes Called Data Center Infrastructure Management (dcim). Dcim Model Provides Data Center Operating Provision In Term Of System Connectivity And Interrelationship Among System Operations To Support Data Center Operations Managers. The Benefits Of Wireless Technology Are Reduced Investment Subject To Time To Implement, Cost Of Installation Labor And Material, And Flexibility Of Mobility. Moreover, Wlan And Vlan Are Deployed To Eliminate The Limitations Of Cabling Infrastructure E.g. Fast Installation, Less Costs Per Switching Port And Cable Wiring, Limited Working Space, Reduce Risk During Installation, Etc. This Model Can Also Reduce Investment Cabling And Network Operating Costs, Control Energy Costs, Improve Energy Efficiency, And Increase Operational Efficiency.
BibTeX:
@inproceedings{wiboonrat-data:2014,
  author = {Wiboonrat, M.},
  title = {Data Center Infrastructure Management Wlan Networks For Monitoring And Controlling Systems},
  booktitle = {2014 Icoin},
  year = {2014},
  pages = {226--231},
  doi = {10.1109/ICOIN.2014.6799696}
}
Dai J, Ohadi MM, Das D and Pecht MG (2014), "Data Center Energy Flow And Efficiency", In Optimum Cooling Of Dcs., January, 2014. , pp. 9-30. Springer Ny.
Abstract: Data Centers Form The Backbone Of Information Management In Every Sector Of The Economy, And Their Energy Consumption Has Been Of Concern To Governments And The Telecom Industry. This Chapter Introduces Data Center Energy Efficiency, Including The Main Components And Operating Environments In Data Centers, As Well As The Standards, Thermal Guidelines, And Metrics Used To Quantify The Energy Efficiency. This Chapter Also Presents The Major Cooling Methods Used In The Industry To Improve Energy Efficiency. A Case Study Is Discussed In Which Energy Consumption Of A Medium-size Primary Data Center At An Academic Campus Is Analyzed And Compared With Experimental Measurements.
BibTeX:
@incollection{dai-data:2014,
  author = {Dai, Jun And Ohadi, Michael M. And Das, Diganta And Pecht, Michael G.},
  title = {Data Center Energy Flow And Efficiency},
  booktitle = {Optimum Cooling Of Dcs},
  publisher = {Springer Ny},
  year = {2014},
  pages = {9--30}
}
Medina V and García JM (2014), "A Survey of Migration Mechanisms of Virtual Machines", ACM Comput. Surv.., January, 2014. Vol. 46(3), pp. 30:1-30:33.
Abstract: In the virtualization area, replication has been considered as a mechanism to provide high availability. A high-availability system should be active most of the time, and this is the reason that its design should consider almost zero downtime and a minimal human intervention if a recovery process is demanded. Several migration and replication mechanisms have been developed to provide high availability inside virtualized environments. In this article, a survey of migration mechanisms is reported. These approaches are classified in three main classes: process migration, memory migration, and suspend/resume migration.
BibTeX:
@article{medina-survey:2014,
  author = {Medina, Violeta and García, Juan Manuel},
  title = {A Survey of Migration Mechanisms of Virtual Machines},
  journal = {ACM Comput. Surv.},
  year = {2014},
  volume = {46},
  number = {3},
  pages = {30:1-30:33},
  url = {http://doi.acm.org/10.1145/2492705},
  doi = {10.1145/2492705}
}
Severance C (2014), "Massimo Banzi: Building Arduino", Computer., January, 2014. Vol. 47(1), pp. 11-12.
Abstract: Massimo Banzi Describes The Origins And Evolution Of The Arduino Microcontroller. The First Web Extra At Http://youtu.be/0vabvq2ti50 Is A Video Interview In Which Author Charles Severance Speaks With Massimo Banzi About The Origins And Evolution Of The Arduino Microcontroller. The Second Web Extra At Http://youtu.be/n6wn1qlfeq Is An Audio Recording Of Author Charles Severance Reading His Computing Conversations Column, In Which He Discusses His Interview With Massimo Banzi About The Origins And Evolution Of The Arduino Microcontroller.
BibTeX:
@article{severance-massimo:2014,
  author = {Severance, Charles},
  title = {Massimo Banzi: Building Arduino},
  journal = {Computer},
  year = {2014},
  volume = {47},
  number = {1},
  pages = {11--12},
  doi = {10.1109/MC.2014.19}
}
Ansi/bicsi-002 (2014), "Ansi/bicsi 002-2014, Data Center Design And Implementation Best Practices". Thesis at: Bicsi. Usa (002-2014), pp. 500.
Abstract: Considered The Foundation Standard For Data Center Design Around The World, Ansi/bicsi 002-2014 Continues Its Mission To Provide Requirements, Guidelines And Best Practices Applicable To Any Data Center. Now Featuring 500 Pages Of Content, The 2014 Edition Of Bicsi 002 Focused On Adding Or Expanding Information On: The Bicsi Availability Class Structure For All Major Aspects Of Data Centers; Modular And Container Data Centers; Dcim And Building Systems; Dc Power Hot And Cold Aisles; Multi-data Center Architecture And Data Center Service Outsourcing; Energy Efficiency. While A Focus Was Made On These And Other Recent Developments, The International Team Of Over 50 Experts From All Disciplines Within Data Centers Also Reviewed The Existing Content To Reflect Changes And Developments In The Major Data Center Markets.
BibTeX:
@report{ansi-bicsi:2014,
  author = {Ansi/bicsi-002},
  title = {Ansi/bicsi 002-2014, Data Center Design And Implementation Best Practices},
  school = {Bicsi},
  year = {2014},
  number = {002-2014},
  pages = {500},
  url = {https://www.bicsi.org/book_details.aspx?Book=BICSI-002-CM-14-v5&d=0}
}
Brenner S, Garbers B and Kapitza R (2014), "Adaptive and scalable high availability for infrastructure clouds", In IFIP International Conference on Distributed Applications and Interoperable Systems. , pp. 16-30. Springer.
BibTeX:
@inproceedings{brenner-adaptive:2014,
  author = {Brenner, Stefan and Garbers, Benjamin and Kapitza, Rüdiger},
  title = {Adaptive and scalable high availability for infrastructure clouds},
  booktitle = {IFIP International Conference on Distributed Applications and Interoperable Systems},
  publisher = {Springer},
  year = {2014},
  pages = {16--30},
  url = {http://link.springer.com/chapter/10.1007/978-3-662-43352-2_2}
}
Camargo DS and Miers CC (2014), "Automação Climática Em Sala De Servidores Utilizando Hardware Livre".
BibTeX:
@misc{camargo-automacao-latino:2014,
  author = {Camargo, Daniel Scheidemantel And Miers, Charles Christian},
  title = {Automação Climática Em Sala De Servidores Utilizando Hardware Livre},
  journal = {Apresentação De Palestra, Latinoware/PR},
  year = {2014}
}
Chavan V and Kaveri PR (2014), "Clustered virtual machines for higher availability of resources with improved scalability in cloud computing", In 2014 First International Conference on Networks Soft Computing (ICNSC2014). , pp. 221-225.
Abstract: Cloud computing a big pool of resources dynamically reconfigures its resources as per user requirement in real time. Cloud environment mostly works with virtualized environment, which is a key consideration for providing virtual machines as a service to the users. It is difficult to manage virtual machine, and to deploy on any data centers. In cloud environment users expects better services from the vendors, in order to improve resource utilization. The factors in cloud environment as if scalability and availability are consider for better outcomes for users. This not only saves time, but also improves cost utilization. This paper proposes architecture, based on clustering virtual machines in datacenters for higher availability of resources with improved scalability. Clustering helps virtual machines to reconfigure and easy scheduling. The resource sharing in cloud will optimize and users get maximized result. Further, the paper directs a mathematical model for explaining the concepts of the proposed system. The existing system is being modulated using simulation tools and further elaborated in the paper.
BibTeX:
@inproceedings{chavan-clustered:2014,
  author = {Chavan, V. and Kaveri, P. R.},
  title = {Clustered virtual machines for higher availability of resources with improved scalability in cloud computing},
  booktitle = {2014 First International Conference on Networks Soft Computing (ICNSC2014)},
  year = {2014},
  pages = {221--225},
  doi = {10.1109/CNSC.2014.6906707}
}
Cheung H, Greenberg S, Mahdavi R, Brown R and Tscudi W (2014), "Energy Efficiency In Small Server Rooms: Field Surveys And Findings", In 2014 Aceee Summer Study On Energy Efficiency In Buildings. Pacific Grove, California
Abstract: Fifty-seven Percent Of Us Servers Are Housed In Server Closets, Server Rooms, And Localized Data Centers, In What Are Commonly Referred To As Small Server Rooms, Which Comprise 99textbackslash% Of All Server Spaces In The Us. While Many Mid-tier And Enterprise-class Data Centers Are Owned By Large Corporations That Consider Energy Efficiency A Goal To Minimize Business Operating Costs, Small Server Rooms Typically Are Not Similarly Motivated. They Are Characterized By Decentralized Ownership And Management And Come In Many Configurations, Which Creates A Unique Set Of Efficiency Challenges. To Develop Energy Efficiency Strategies For These Spaces, We Surveyed 30 Small Server Rooms Across Eight Institutions, And Selected Four Of Them For Detailed Assessments. The Four Rooms Had Power Usage Effectiveness (pue) Values Ranging From 1.5 To 2.1. Energy Saving Opportunities Ranged From No- To Low-cost Measures Such As Raising Cooling Set Points And Better Airflow Management, To More Involved But Cost-effective Measures Including Server Consolidation And Virtualization, And Dedicated Cooling With Economizers. We Found That Inefficiencies Mainly Resulted From Organizational Rather Than Technical Issues. Because Of The Inherent Space And Resource Limitations, The Most Effective Measure Is To Operate Servers Through Energy-efficient Cloud-based Services Or Well-managed Larger Data Centers, Rather Than Server Rooms. Backup Power Requirement, And It And Cooling Efficiency Should Be Evaluated To Minimize Energy Waste In The Server Space. Utility Programs Are Instrumental In Raising Awareness And Spreading Technical Knowledge On Server Operation, And The Implementation Of Energy Efficiency Measures In Small Server Rooms.
BibTeX:
@inproceedings{cheung-energy:2014,
  author = {Cheung, H.y. And Greenberg, Steve And Mahdavi, Roozbeh And Brown, Richard And Tscudi, William},
  title = {Energy Efficiency In Small Server Rooms: Field Surveys And Findings},
  booktitle = {2014 Aceee Summer Study On Energy Efficiency In Buildings},
  year = {2014}
}
Dai J, Ohadi MM, Das D and Pecht MG (2014), "Optimum Cooling Of Data Centers: Application Of Risk Assessment And Mitigation Techniques" New York, Ny Springer New York.
Abstract: This Book Describes The Use Of Free Air Cooling To Improve The Efficiency Of, And Cooling Of, Equipment For Use In Telecom Infrastructures. Discussed At Length Is The Cooling Of Communication Installation Rooms Such As Data Centers Or Base Stations, And This Is Intended As A Valuable Tool For The People Designing And Manufacturing Key Parts Of Communication Networks. This Book Provides An Introduction To Current Cooling Methods Used For Energy Reduction, And Also Compares Present Cooling Methods In Use In The Field. The Qualification Methods And Standard Reliability Assessments Are Reviewed, And Their Inability To Assess The Risks Of Free Air Cooling Is Discussed. The Method Of Identifying The Risks Associated With Free Air Cooling On Equipment Performance And Reliability Is Introduced. A Novel Method Of Assessment For Free Air Cooling Is Also Proposed That Utilizes Prognostics And Health Management (phm).this Book Also: Describes How The Implementation Of Free Air Cooling Can Save Energy For Cooling Within The Telecommunications Infrastructure.analyzes The Potential Risks And Failures Of Mechanisms Possible In The Implementation Of Free Air Cooling, Which Benefits Manufacturers And Equipment Designers.presents Prognostics-based Assessments To Identify And Mitigate The Risks Of Telecommunications Equipment Under Free Air Cooling Conditions, Which Can Provide The Early Warning Of Equipment Failures At Operation Stage Without Disturbing The Data Centers' Service.optimum Cooling For Data Centers Is An Ideal Book For Researchers And Engineers Interested In Designing And Manufacturing Equipment For Use In Telecom Infrastructures.
BibTeX:
@book{dai-optimum:2014,
  author = {Dai, Jun And Ohadi, Michael M. And Das, Diganta And Pecht, Michael G.},
  title = {Optimum Cooling Of Data Centers: Application Of Risk Assessment And Mitigation Techniques},
  publisher = {Springer New York},
  year = {2014},
  doi = {10.1007/978-1-4614-5602-5}
}
David M and Schmidt R (2014), "Impact Of Ashrae Environmental Classes On Data Centers", In 2014 IEEE Itherm. , pp. 1092-1099.
Abstract: Data Centers Consume A Significant Amount Of Energy In The Us And Worldwide, Much Of Which Is Consumed By The Cooling Infrastructure, Particularly The Chiller Plant And Computer Room Air Conditioners And Air Handlers. To Enable Energy Efficient Data Center Designs, Ashrae Added Two New It Environmental Classes, A3 And A4, With Associated Allowable Inlet Air Temperatures Of 40c And 45c Respectively. It Equipment That Meet These New Allowable Environmental Envelopes Can Operate In Data Centers With Minimal Refrigeration Cooling And Instead Rely On Ambient Free Cooling. In This Paper We Investigate The Impact Of Allowing A Data Center To Operate Up To The A3 Limit Of 40c On Total Data Center Energy Use For 3 Different Types Of Servers In A Chiller-less Data Center Located In A Variety Of Locations. The Study Finds That Though Facility Power Reduces As The Demand For Cold Air Reduces, The Increase In It Power Consumption, Due To Fan Speed-up, Can Offset These Savings And In Some Cases Result In An Overall Increase In Data Center Power. Thus The Most Energy Efficient Operating Point Is Dependant On The Specific Energy Use Profiles Of The Infrastructure And The It Equipment. The Higher Allowable Temperature Can Also Result In Higher Failure Rates And An Increased Risk Of Equipment Or Service Loss Due To Data Center Cooling Failures. This Paper Also Presents A Study On The Potential For Chiller Elimination And Chiller Use Reduction Across The Us, Europe And In India By Operating In The Various Ashrae Envelopes. For Wet, Water Side Economized Data Centers, A2 And A3 Equipment Is Sufficient To Almost Completely Remove The Need For Chillers In Many Geographic Locations.
BibTeX:
@inproceedings{david-impact:2014,
  author = {David, M.p. And Schmidt, R.r.},
  title = {Impact Of Ashrae Environmental Classes On Data Centers},
  booktitle = {2014 IEEE Itherm},
  year = {2014},
  pages = {1092--1099},
  doi = {10.1109/ITHERM.2014.6892403}
}
Delforge P (2014), "America's Data Centers Consuming And Wasting Growing Amounts Of Energy".
Abstract: Data Centers Are One Of The Largest And Fastest Growing Consumers Of Electricity In The United States. In 2013, U.s. Data Centers Consumed An Estimated 91 Billion Kilowatt-hours Of Electricity -- Enough Electricity To Power All The Households In New York City Twice Over -- And Are On-track To Reach 140 Billion Kilowatt-hours By 2020.
BibTeX:
@misc{delforge-americas:2014,
  author = {Delforge, Pierre},
  title = {America's Data Centers Consuming And Wasting Growing Amounts Of Energy},
  journal = {Natural Resources Defense Council -- Nrdc},
  year = {2014},
  url = {http://www.nrdc.org/energy/data-center-efficiency-assessment.asp}
}
Dhurandher SK, Obaidat MS, Woungang I, Agarwal P, Gupta A and Gupta P (2014), "A Cluster-based Load Balancing Algorithm In Cloud Computing", In 2014 IEEE International Conference On Communications (icc). , pp. 2921-2925.
Abstract: Workload And Resource Management Are Two Essential Functions Provided At The Service Level Of The Distributed Systems Infrastructure. To Improve The Global Throughput Of These Software Environments, Workloads Have To Be Evenly Scheduled Among The Available Resources. To Realize This Goal, Several Load Balancing Strategies And Algorithms Have Been Proposed. Most O F T H E S E Strategies Were Developed Assuming Homogeneous Set Of Sites Linked With Homogeneous And Fast Networks. However, For Computational Grids, We Must Address Some New Issues, Namely: Heterogeneity, Scalability And Adaptability. In This Paper, We Propose A Decentralized Cluster-based Algorithm Which Achieves Dynamic Load Balancing In The Cloud Architecture. The Proposed Algorithm Presents The Following Main Features: (i) It Supports Heterogeneity, (ii) Scalability, (iii) Low Network Congestion And (iv) Absence Of Any Bottleneck Node Due To Its Decentralized Nature. Simulation Results Using Cloudsim Show The Performance Analysis Of The Algorithm For Patronizing Our Claims About The Load Balancing Achieved In The System.
BibTeX:
@inproceedings{dhurandher-cluster-based:2014,
  author = {Dhurandher, S. K. And Obaidat, M. S. And Woungang, I. And Agarwal, P. And Gupta, A. And Gupta, P.},
  title = {A Cluster-based Load Balancing Algorithm In Cloud Computing},
  booktitle = {2014 IEEE International Conference On Communications (icc)},
  year = {2014},
  pages = {2921--2925},
  doi = {10.1109/ICC.2014.6883768}
}
Florence AP and Shanthi V (2014), "Energy Aware Load Balancing For Computational Cloud", In 2014 IEEE International Conference On Computational Intelligence And Computing Research (iccic). , pp. 1-3.
Abstract: Cloud Computing Is Novel Technology, Which Enables Any Resource As Service On Demand. Cloud Environment Motivates Highly Dynamic Resource Provisioning. Hence Clients Can Scale Up Or Scale Down Their Requirements As Per Their Demand. Load Balancing Is Very Important And Complex Problem In Cloud Environment, Because Of Its Heterogeneity Of The Computing Nodes. In Order To Realize The Full Potential Of Cloud Computing It Is Vital To Minimize Energy Consumption Along With Effective Load Balancing. The Aim Of Energy Aware Load Balancing (ealb) Model Is To Minimize Energy Consumption With Load Balancing. Ealb Model Classifies The Incoming Job Request Either Cpu Bound Or I/o Bound According To Their Purpose And Behaviour. This Classification Details Are Maintained In A Table Named Pattern History Table (pht) And Organized As Hash Table. One Of The Virtual Machine (vm) Is Selected Dynamically Based On Best Fit Allocation Policy And The Job Is Assigned To The Victimized Vm. From The Pattern History Table Job's Nature Is Identified. Using Dynamic Voltage Frequency Scaling (dvfs) Scheme The Selected Vm's Processor Clock Frequency Is Increased If It Is Found Cpu Bound Else Decreased (i/o Bound). Thus, Ealb Algorithm Saves Considerable Amount Of Energy And Proves To Be More Efficient.
BibTeX:
@inproceedings{florence-energy:2014,
  author = {Florence, A. P. And Shanthi, V.},
  title = {Energy Aware Load Balancing For Computational Cloud},
  booktitle = {2014 IEEE International Conference On Computational Intelligence And Computing Research (iccic)},
  year = {2014},
  pages = {1--3},
  doi = {10.1109/ICCIC.2014.7238489}
}
Hossain S (2014), "Cloud computing terms, definitions, and taxonomy", In Cloud Technology: Concepts, Methodologies, Tools, and Applications. Vol. 1, pp. 25-49.
Abstract: Cloud computing has taken the IT industry by storm. It has ushered a new era of computing and IT delivery model. This chapter introduces terms and terminologies associated with cloud computing from a vendor neutral perspective. Readers are gradually introduced to cloud computing elements which pave the way for better understanding in later chapters. © 2015 by IGI Global. All rights reserved.
BibTeX:
@incollection{hossain-cloud:2014,
  author = {Hossain, S.},
  title = {Cloud computing terms, definitions, and taxonomy},
  booktitle = {Cloud Technology: Concepts, Methodologies, Tools, and Applications},
  year = {2014},
  volume = {1},
  pages = {25--49},
  note = {DOI: 10.4018/978-1-4666-6539-2.ch002}
}
Verge J (2014), "The Green Grid Unveils Energy Productivity Metric For Data Centers"
Abstract: The Green Grid Has Finally Reached Consensus On A Metric More Than Five Years In The Making: Data Center Energy Productivity, Or Dcep, Is Computed As Useful Work Produced Divided By Total Energy Consumed By The Data Center. Dcep Allows Each User To Define Useful Work As Applicable To The User’s Business. Read More
BibTeX:
@online{jason-green:2014,
  author = {Verge , Jason},
  title = {The Green Grid Unveils Energy Productivity Metric For Data Centers},
  year = {2014},
  url = {http://www.datacenterknowledge.com/archives/2014/03/20/green-grid-unveils-energy-productivity-metric-data-centers/}
}
Jeong S and Kim YW (2014), "A Holistic Investigation Method For Data Center Resource Efficiency", In 2014 International Conference On Information And Communication Technology Convergence (ictc). , pp. 548-551.
Abstract: The Rapid Increase In Energy Consumption Of Data Centers Accelerates The Development Of Metrics For Measuring Data Center Energy Efficiency. In Response To This Demand, Iso/iec Jtc1/sc39 Is Developing Iso/iec 30134 Series Standards Defining Multiple Key Performance Indicators (kpis) For Measuring Data Center Resource Efficiency. However, There Is Another Emerging Demand To Observe The Overall Trend Of All Kpis In A Single View And The Inter-relationship Among The Kpis. The Main Objective Of This Paper Is To Describe The General Holistic Investigation Method And Identify Issues Associated With Various Strategies. Then It Proposes A Control Chart-based Holistic Investigation Method For Resource Efficiency Evaluation And Interrelationship Identification Of Data Center Multiple Kpis. Through The Swot Analysis On The Holistic Investigation Method Presented In This Paper, The Usefulness And Applicability Of Holistic Investigation Method Are Discussed.
BibTeX:
@inproceedings{jeong-holistic:2014,
  author = {Jeong, S. And Kim, Y. W.},
  title = {A Holistic Investigation Method For Data Center Resource Efficiency},
  booktitle = {2014 International Conference On Information And Communication Technology Convergence (ictc)},
  year = {2014},
  pages = {548--551},
  doi = {10.1109/ICTC.2014.6983207}
}
Jiankang D, Hongbo W, Yangyang L and Shiduan C (2014), "Virtual Machine Scheduling For Improving Energy Efciency In Iaas Cloud", China Communications. Vol. 11(3), pp. 1-12.
BibTeX:
@article{jiankang-virtual:2014,
  author = {Jiankang, Dong And Hongbo, Wang And Yangyang, Li And Shiduan, Cheng},
  title = {Virtual Machine Scheduling For Improving Energy Efciency In Iaas Cloud},
  journal = {China Communications},
  year = {2014},
  volume = {11},
  number = {3},
  pages = {1--12},
  url = {http://ieeexplore.ieee.org/document/6825253/},
  doi = {10.1109/CC.2014.6825253}
}
Lunardi GL, Simões R and Frio RS (2014), "Green It: An Analysis Of The Main Benefits And Practices Used By Organizations", Read. Revista Eletrônica De Administração (porto Alegre). Vol. 20(1), pp. 1-30.
BibTeX:
@article{lunardi-green:2014,
  author = {Lunardi, Guilherme Lerch And Simões, Renata And Frio, Ricardo Saraiva},
  title = {Green It: An Analysis Of The Main Benefits And Practices Used By Organizations},
  journal = {Read. Revista Eletrônica De Administração (porto Alegre)},
  year = {2014},
  volume = {20},
  number = {1},
  pages = {1--30},
  url = {http://www.scielo.br/scielo.php?script=sci_abstract&pid=S1413-23112014000100001&lng=en&nrm=iso&tlng=pt},
  doi = {10.1590/S1413-23112014000100001}
}
Andrea M and Wallace B (2014), "Data Center Size And Density Standards". Thesis at: Data Center Institute Standards Endorsed. (Dcise-001), pp. 19.
Abstract: Under The Guidance Of Afcom’s Data Center Institute Board, Industry Consultancy The Strategic Directions Group Was Commissioned To Develop A Standard Set Of Facility Size And Density Ratings To Inform The Development Of Regulatory Obligations, Such As Building Energy Efficiency Reporting, At The State, Regional And Local Levels.
BibTeX:
@techreport{mike-data:2014,
  author = {Andrea, Mike And Wallace, Byron},
  title = {Data Center Size And Density Standards},
  school = {Data Center Institute Standards Endorsed},
  year = {2014},
  number = {Dcise-001},
  pages = {19}
}
Mogami S and Rodrigues S (2014), "Data Centers Para Pequenas Empresas", Revista Rti. Vol. 167, pp. 20.
Abstract: Http://www.arandanet.com.br/midiaonline/rti/2014/abril/index.html
BibTeX:
@article{mogami-rti:2014,
  author = {Mogami, Sandra And Rodrigues, Simone},
  title = {Data Centers Para Pequenas Empresas},
  journal = {Revista Rti},
  year = {2014},
  volume = {167},
  pages = {20}
}
Mwila MK, Djouani K and Kurien A (2014), "Approach To Sensor Node Calibration For Efficient Localisation In Wireless Sensor Networks In Realistic Scenarios", Procedia Computer Science. Vol. 32, pp. 166-173.
Abstract: Localisation Or Position Determination Is One Of The Most Important Applications For The Wireless Sensor Networks. Numerous Current Techniques For Localisation Of Sensor Nodes Use The Received Signal Strength Indicator (rssi) From Sensor Nodes Because Of Its Simplicity And Cost. Non-linearities In Rssi Circuits, The Antenna Radiation Pattern And Path Loss Model Parameter Estimation May Result In Accuracy Of The Localisation Algorithm. Therefore, Environmental Characterisation Of Radio Propagation Basic Mechanisms Is A Fundamental Step Toward The Design Of Ranging And Localisation Algorithms Able To Work Properly In Realistic Scenarios. Furthermore, Positioning Systems Are Migrating Towards Hybridisation Where Data Coming From Heterogeneous Technologies Are Fused To Improve Localisation Accuracy And Coverage. This Paper Presents An Improved Mathematical Model For Ranging Using Rssi In Realistic Practical Scenarios As Well As Measurement Methodologies To Use During Calibration Experiments In Order To Quantify Each Parameter Involved In A Localisation Algorithm Using A Sensor Data Fusion Approach.
BibTeX:
@article{mwila-approach:2014,
  author = {Mwila, Martin K. And Djouani, Karim And Kurien, Anish},
  title = {Approach To Sensor Node Calibration For Efficient Localisation In Wireless Sensor Networks In Realistic Scenarios},
  journal = {Procedia Computer Science},
  year = {2014},
  volume = {32},
  pages = {166--173},
  url = {http://www.sciencedirect.com/science/article/pii/S1877050914006115},
  doi = {10.1016/j.procs.2014.05.411}
}
Evangelista Netto J, Hernane Paulicena E, Amorim Silva R and Anzaloni A (2014), "Analysis Of Energy Consumption Using Http And Ftp Protocols Over IEEE 802.11", Latin America Transactions, IEEE (revista IEEE America Latina). Vol. 12(4), pp. 668-674.
Abstract: This Paper Presents An Overview On Green Computing And Energy Aware To Network Communication. There Was A Measurement Of Energy Efficiency And Comparison Of The Main Network Transfer Protocols Used On The Internet. A Tool Named Energy Consumption Model Was Developed And Inserted Into The Discrete Event Simulator Opnet Modeler To Run The Tests. An Exhaustive Analysis Of Consumption Was Performed During The Data Transfer Phase With Http And Ftp Protocol Using Different Packet Sizes. From Data Obtained It Was Possible Not Only To Quantify The Energy Consumed By The Protocols, But Also Perform A Comparative Analysis Between Them. The Results Suggest That The Use Of Http Is Most Suitable Than The Ftp. However, This Advantage Decreases Proportionately To Larger Objects.
BibTeX:
@article{netto-analysis:2014,
  author = {Evangelista Netto, Joao And Hernane Paulicena, Edesio And Amorim Silva, Rafael And Anzaloni, Alessandro},
  title = {Analysis Of Energy Consumption Using Http And Ftp Protocols Over IEEE 802.11},
  journal = {Latin America Transactions, IEEE (revista IEEE America Latina)},
  year = {2014},
  volume = {12},
  number = {4},
  pages = {668--674},
  doi = {10.1109/TLA.2014.6868868}
}
ODCA (2014), "Master Usage Model: Compute Infrastructure as a Service (CIaaS) Rev2.0". Thesis at: OPEN DATA CENTER ALLIANCE. , pp. 58.
BibTeX:
@techreport{odca-master:2014,
  author = {ODCA},
  title = {Master Usage Model: Compute Infrastructure as a Service (CIaaS) Rev2.0},
  school = {OPEN DATA CENTER ALLIANCE},
  year = {2014},
  pages = {58},
  url = {https://opendatacenteralliance.org/docs/compute_infrastructure_as_a_service_rev_2.pdf}
}
Ono MY (2014), "Indicadores de desempenho em Data Center". Thesis at: Escola Politécnica da Universidade de São Paulo. MBA/USP
BibTeX:
@phdthesis{ono-indicadores:2014,
  author = {Ono, Mario Yoshito},
  title = {Indicadores de desempenho em Data Center},
  school = {Escola Politécnica da Universidade de São Paulo},
  year = {2014},
  url = {http://poli-integra.poli.usp.br/library/pdfs/e9dd84cde3229a14e0589d884f979772.pdf}
}
Petri I, Li H, Rezgui Y, Chunfeng Y, Yuce B and Jayan B (2014), "A modular optimisation model for reducing energy consumption in large scale building facilities", Renewable and Sustainable Energy Reviews. Vol. 38, pp. 990-1002.
Abstract: With the pressing regulatory requirement to increase energy efficiency in our built environment, significant researching efforts have been recently directed towards energy optimisation with the overall objective of reducing energy consumption. Energy simulation and optimisation identify a class of applications that demand high performance processing power in order to be realised within a feasible time-frame. The problem becomes increasingly complex when undertaking such energy simulation and optimisation in large scale buildings such as sport facilities where the generation of optimal set points can be timing inefficient. In this paper we present how a modular based optimisation system can be efficiently used for running energy simulation and optimisation in order to fulfil a number of energy related objectives. The solution can address the variability in building dynamics and provide support for building managers in implementing energy efficient optimisation plans. We present the optimisation system that has been implemented based on energy saving specifications from EU FP7 project – SportE2 (Energy Efficiency for Sport Facilities) and evaluate the efficiency of the system over a number of relevant use-case scenarios.
BibTeX:
@article{petri-modular:2014,
  author = {Petri, Ioan and Li, Haijiang and Rezgui, Yacine and Chunfeng, Yang and Yuce, Baris and Jayan, Bejay},
  title = {A modular optimisation model for reducing energy consumption in large scale building facilities},
  journal = {Renewable and Sustainable Energy Reviews},
  year = {2014},
  volume = {38},
  pages = {990--1002},
  url = {http://www.sciencedirect.com/science/article/pii/S1364032114004961},
  doi = {10.1016/j.rser.2014.07.044}
}
Rohani H and Roosta AK (2014), "Calculating Total System Availability", Delaat Report, available in http://www.delaat.net/rp/2013-2014/p17/report.pdf.
BibTeX:
@article{rohani-calculating:2014,
  author = {Rohani, Hoda and Roosta, Azad Kamali},
  title = {Calculating Total System Availability},
  journal = {Delaat Report, available in http://www.delaat.net/rp/2013-2014/p17/report.pdf},
  year = {2014}
}
Rubenstein B and Faist M (2014), "Dc Cold Aisle Set Point Optimization Through Total Operating Cost Modeling", In IEEE-itherm. , pp. 1111.
Abstract: Operational Expenses Of Servers And The Data Center To Support Them Account For A Significant Part Of The Total Cost Of Ownership Of A Data Center And Potentially The Total Business. Manipulating The Inlet Temperature To The Servers Can Provide A Possible Method Of Lowering The Operating Cost Of A Data Center. Conventional Wisdom Regarding Management Of The Cold Aisle Temperature In The Data Center Has Traditionally Been That Colder Temperatures Kept The Failure Rate Of The Equipment Down And Higher Temperatures Reduced Energy Costs. While It Is True That In Most Cooling Strategies, Increasing The Cold Aisle Set Point Temperature Reduces The Power Consumed By The Facilities, The Overall Cost Savings May Not Be Realized Due To Increases In Server Power And Decreased Reliability. An Investigation Using The Total Operational Expense Of The Data Center Would Be Required To Find The Optimum Operating Point Of The Environmental Controls. The Simulation Described In This Paper Demonstrates That When Accounting For Total Operational Cost Including Component Reliability And Servicing In Addition To Energy And Water Resource Costs, There Is An Optimal Data Center Cold Aisle Set Point. This Point Will Vary With Resource Rates And Climate Variations. The Run Cost Effectiveness Metric, Rce, Allows Data Center Designers And Managers To Monitor And Optimize Their Data Center For The Given Static Costs Of The Region. It's A Measurable Value For Many Data Centers That Can Break Out These Costs Either In Real Time, Or More Likely Tabulated Form Collected From Monitoring Data Over Time.
BibTeX:
@inproceedings{rubenstein-data:2014,
  author = {Rubenstein, B. And Faist, M.},
  title = {Dc Cold Aisle Set Point Optimization Through Total Operating Cost Modeling},
  booktitle = {IEEE-itherm},
  year = {2014},
  pages = {1111},
  doi = {10.1109/ITHERM.2014.6892405}
}
Santos SCD, Furtado F and Lins W (2014), "Xpbl: A Methodology For Managing Pbl When Teaching Computing", In 2014 IEEE Frontiers In Education Conference (fie) Proceedings. , pp. 1-8. IEEE.
Abstract: In Order To Exploit The Benefits Of Pbl And Mitigate The Risk Of Failure When Implementing It, The Next (innovative Educational Experience In Technology) Research Group Has Been Working On Methods And Tools Focused On Managing The Pbl Approach As Applied To Computing. In This Context, This Article Proposes A Teaching And Learning Methodology Based On Pbl, Called Xpbl, Consisting Of Elements That Reinforce Pbl Principles, Namely: Real And Relevant Problems; A Practical Environment; An Innovative And Flexible Curriculum; An Authentic Assessment Process; Close Monitoring By Technical Tutors And Process Tutors, And Finally, Professional Practitioners As Teachers And Tutors. Based On These Elements, The Paper Describes The Design Of A Pbl Approach For A Design Course, Grounded On Acquired Knowledge Of Design Content And Past Pbl Experiences In Software Engineering Courses. This Approach Provides An Insightful Guide To Implementing Pbl From Xpbl Methodology, And Provides Instruments Based On Management Techniques Such As 5w2h (what, Why, Who, When, Where, How And How Much) And The Production Of Artifacts To Support The Conception Process Of Courses Based On Pbl.
BibTeX:
@inproceedings{santos-xpbl::2014,
  author = {Santos, S. C. Dos And Furtado, F. And Lins, W.},
  title = {Xpbl: A Methodology For Managing Pbl When Teaching Computing},
  booktitle = {2014 IEEE Frontiers In Education Conference (fie) Proceedings},
  publisher = {IEEE},
  year = {2014},
  pages = {1--8},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7044178},
  doi = {10.1109/FIE.2014.7044178}
}
Song W, Xiao Z, Chen Q and Luo H (2014), "Adaptive Resource Provisioning For The Cloud Using Online Bin Packing", IEEE Trans. Comput.. Vol. 63(11), pp. 2647-2660.
Abstract: Data Center Applications Present Significant Opportunities For Multiplexing Server Resources. Virtualization Technology Makes It Easy To Move Running Application Across Physical Machines. In This Paper, We Present An Approach That Uses Virtualization Technology To Allocate Data Center Resources Dynamically Based On Application Demands And Support Green Computing By Optimizing The Number Of Servers Actively Used. We Abstract This As A Variant Of The Relaxed On-line Bin Packing Problem And Develop A Practical, Efficient Algorithm That Works Well In A Real System. We Adjust The Resources Available To Each Vm Both Within And Across Physical Servers. Extensive Simulation And Experiment Results Demonstrate That Our System Achieves Good Performance Compared To The Existing Work.
BibTeX:
@article{song-adaptive:2014,
  author = {Song, Weijia And Xiao, Zhen And Chen, Qi And Luo, Haipeng},
  title = {Adaptive Resource Provisioning For The Cloud Using Online Bin Packing},
  journal = {IEEE Trans. Comput.},
  year = {2014},
  volume = {63},
  number = {11},
  pages = {2647--2660},
  url = {http://ieeexplore.ieee.org/document/6565979/},
  doi = {10.1109/TC.2013.148}
}
Sun H, Stolf P, Pierson J-m and Da Costa G (2014), "Energy-efficient And Thermal-aware Resource Management For Heterogeneous Datacenters", Sustainable Computing: Informatics And Systems. Vol. 4(4), pp. 292-306.
Abstract: We Propose In This Paper To Study The Energy-, Thermal- And Performance-aware Resource Management In Heterogeneous Datacenters. Witnessing The Continuous Development Of Heterogeneity In Datacenters, We Are Confronted With Their Different Behaviors In Terms Of Performance, Power Consumption And Thermal Dissipation: Indeed, Heterogeneity At Server Level Lies Both In The Computing Infrastructure (computing Power, Electrical Power Consumption) And In The Heat Removal Systems (different Enclosure, Fans, Thermal Sinks). Also The Physical Locations Of The Servers Become Important With Heterogeneity Since Some Servers Can (over)heat Others. While Many Studies Address Independently These Parameters (most Of The Time Performance And Power Or Energy), We Show In This Paper The Necessity To Tackle All These Aspects For An Optimal Resource Management Of The Computing Resources. This Leads To Improved Energy Usage In A Heterogeneous Datacenter Including The Cooling Of The Computer Rooms. We Build Our Approach On The Concept Of Heat Distribution Matrix To Handle The Mutual Influence Of The Servers, In Heterogeneous Environments, Which Is Novel In This Context. We Propose A Heuristic To Solve The Server Placement Problem And We Design A Generic Greedy Framework For The Online Scheduling Problem. We Derive Several Single-objective Heuristics (for Performance, Energy, Cooling) And A Novel Fuzzy-based Priority Mechanism To Handle Their Tradeoffs. Finally, We Show Results Using Extensive Simulations Fed With Actual Measurements On Heterogeneous Servers.
BibTeX:
@article{sun-energy-efficient:2014,
  author = {Sun, Hongyang And Stolf, Patricia And Pierson, Jean-marc And Da Costa, Georges},
  title = {Energy-efficient And Thermal-aware Resource Management For Heterogeneous Datacenters},
  journal = {Sustainable Computing: Informatics And Systems},
  year = {2014},
  volume = {4},
  number = {4},
  pages = {292--306},
  url = {http://arxiv.org/abs/1410.3104},
  doi = {10.1016/j.suscom.2014.08.005}
}
Sun M and Chen F (2014), "Research on availability of virtual machine hot standby based on Xen", In International Conference on Software Intelligence Technologies and Applications International Conference on Frontiers of Internet of Things 2014. , pp. 330-335.
Abstract: As the important measure to ensure the high availability of campus virtual data center, virtual machine hot standby has got comprehensive attention. For low availability caused by intrinsic memory copying technology, this paper proposes pre-transfer based on memory selection algorithm and stoptransfer based on memory compression algorithm. A part of memory pages are pre-transferred during the primary virtual machine running, to reduce the number of transferred memory pages in downtime. Meanwhile, on the basis of program locality, predict the dirty pages rate and pre-transfer dirty pages whose rate are low to refrain from duplicate transmission at the stage of stop-transfer. To stoptransfer, improved memory compression algorithm is used to compress the memory pages on the purpose of enhancing the efficiency of stop transmission. The simulation result shows that the downtime of improved Remus system decreases obviously compared with the original Remus in Xen, especially in the situation of high dirty pages rate and long time interval.
BibTeX:
@inproceedings{sun-research:2014,
  author = {Sun, Mingsong and Chen, Fang},
  title = {Research on availability of virtual machine hot standby based on Xen},
  booktitle = {International Conference on Software Intelligence Technologies and Applications International Conference on Frontiers of Internet of Things 2014},
  year = {2014},
  pages = {330--335},
  doi = {10.1049/cp.2014.1584}
}
Vigliotti APMDLF and Batista DM (2014), "A Green Network-aware Vms Placement Mechanism", In 2014 IEEE Global Communications Conference. , pp. 2530-2535.
Abstract: Data Centers Power Consumption Corresponds To Near 2% Of The Total World Wide Power Consumption, With Constantly Increasing Greenhouse Effect And Co2 Footprints. Virtualization Techniques Improve The Efficiency Of Data Centers Infrastructure Sharing A Same Physical Hardware Among Several Virtual Machines (vms). An Efficient Vm Placement Can Minimize Even Further The Hardware And Energy Needs. In Contrast To Existing Vm Placement Algorithms That Usually Focus On A Single Resource Or Assumes That Resources Demands Are Deterministic, This Paper Proposes And Compares Four Energy-aware Algorithms That Consider Multiple Stochastic Resources, Including Network Bandwidth. We First Formulate The Problem As A Multi Objective Optimization Problem With Stochastic Resources And We Present Two Algorithms Based On This Approach. We Also Formulate The Problem As An Evolutionary Computation Problem And We Present Two Algorithms Based On This Approach. The Objective Is A Joint Strategy: Minimize The Required Hardware To Maximize The Allocated Vms Satisfying The Resource Requirements. Through Simulations, We Compare Our Algorithms Using Real Vms Workloads From The Planetlab Project And Showed The Significant Improvements On Power Consumption And Network Utilization. In Average, The Algorithms Reduce Power Consumption By 87.90% And The Network Utilization By 9.94%.
BibTeX:
@inproceedings{vigliotti-green:2014,
  author = {Vigliotti, A. P. M. De La Fuente And Batista, D. M.},
  title = {A Green Network-aware Vms Placement Mechanism},
  booktitle = {2014 IEEE Global Communications Conference},
  year = {2014},
  pages = {2530--2535},
  doi = {10.1109/GLOCOM.2014.7037188}
}
Webarcondicionado (2014), "A Relação Entre Ar Condicionado E Data Centers".
Abstract: Você Só Está Lendo Este Texto Graças A Um Sistema De Climatização. Essa Ligação Entre O Ar Condicionado E Os Computadores é Indireta, Mas Determinante. O Centro De Processamento De Dados (cpd), Conhecido Em Inglês Como Data Center, é O …
BibTeX:
@misc{webarcondicionado-relacao:2014,
  author = {Webarcondicionado},
  title = {A Relação Entre Ar Condicionado E Data Centers},
  journal = {Portal Web Ar Condicionado},
  year = {2014},
  url = {http://www.webarcondicionado.com.br/a-relacao-entre-ar-condicionado-e-data-centers}
}
Wiboonrat M (2014), "Life Cycle Cost Analysis Of Data Center Project", In 2014 Ninth International Conference On Ecological Vehicles And Renewable Energies (ever). , pp. 1-6.
Abstract: The Reexamining The Feasibility And Cost Impact Of Sustainable Data Center Design Is In The Light Of Increased Market Adoption. Life Cycle Cost Analysis (lcca) Of Data Centers Is A System Approach For Appraising And Predicting The Total Cost Of Facility Ownership. It Must Be Taken Into Account All Costs Of Feasible, Estimating, Acquiring, Operating, Maintaining, And Disposing Of Data Center Ecosystem. This Research Classified Lcca Of Data Center Into Four Phases: Planning, Implementing, Operating, And Transforming. The Qualitative Research Was Conducted Through Investigating Triangulation, Interviews, Focus Groups And Case Studies. Data Center's Consultants, System Integrators, And Project Owners Are The Focus Groups For In-depth Interviews. The Research Findings Propose Data Center Cost Capsule (dccc) Model Which Identifies All Associated Costs Of Entire Data Center Life Cycle; Before, During, And After Operations. The Dccc Model Is Designed To Comply With International Standards E.g., Iso 9001: 2008 And Tia 942. Moreover, The Dccc Model Foresees The Total Costs Of Ownership Of Data Center.
BibTeX:
@inproceedings{wiboonrat-life:2014,
  author = {Wiboonrat, M.},
  title = {Life Cycle Cost Analysis Of Data Center Project},
  booktitle = {2014 Ninth International Conference On Ecological Vehicles And Renewable Energies (ever)},
  year = {2014},
  pages = {1--6},
  doi = {10.1109/EVER.2014.6844139}
}
Ye H, Song Z and Sun Q (2014), "Design Of Green Data Center Deployment Model Based On Cloud Computing And Tia942 Heat Dissipation Standard", In 2014 IEEE Iweca. , pp. 433-437.
Abstract: Data Center Is A Field Which Has High Investment Of Funds, Manpower And Operations In It Components. Now, The Green Data Center Construction Which Has The Core Idea Of Energy Conservation And Emission Reduction Is The Main Projection Of It Industry. New Generation Unified Switch Architecture Can Provide Users A Brief And Green Data Center. Combined With The Development Status Of The Current Data Center, This Paper Has Discussed The Key Techniques And Methods Based On Tia942 Green Data Center Energy Conservation And Emission Reduction And The Cloud Computing Data Center. It Has Also Combined Cloud Computing And Green Data Center Based On Tia942 Heat Dissipation Standards To Propose The Fusion Model And Finish The Application Deployment Of Green Data Center Fusion Model.
BibTeX:
@inproceedings{ye-design:2014,
  author = {Ye, Hanmin And Song, Zihang And Sun, Qianting},
  title = {Design Of Green Data Center Deployment Model Based On Cloud Computing And Tia942 Heat Dissipation Standard},
  booktitle = {2014 IEEE Iweca},
  year = {2014},
  pages = {433--437},
  doi = {10.1109/IWECA.2014.6845649}
}
Zhang Q, Metri G, Raghavan S and Shi W (2014), "Rescue: An Energy-aware Scheduler For Cloud Environments", Sustainable Computing: Informatics And Systems. Vol. 4(4), pp. 215-224.
BibTeX:
@article{zhang-rescue:2014,
  author = {Zhang, Quan And Metri, Grace And Raghavan, Sudharsan And Shi, Weisong},
  title = {Rescue: An Energy-aware Scheduler For Cloud Environments},
  journal = {Sustainable Computing: Informatics And Systems},
  year = {2014},
  volume = {4},
  number = {4},
  pages = {215--224},
  url = {http://linkinghub.elsevier.com/retrieve/pii/S2210537914000493},
  doi = {10.1016/j.suscom.2014.08.008}
}
Liu Q, Moreto M, Jimenez V, Abella J, Cazorla FJ and Valero M (2013), "Hardware Support For Accurate Per-task Energy Metering In Multicore Systems", ACM Transactions On Architecture And Code Optimization., December, 2013. Vol. 10(4), pp. 341-3427.
Abstract: Accurately Determining The Energy Consumed By Each Task In A System Will Become Of Prominent Importance In Future Multicore-based Systems Because It Offers Several Benefits, Including (i) Better Application Energy/performance Optimizations, (ii) Improved Energy-aware Task Scheduling, And (iii) Energy-aware Billing In Data Centers. Unfortunately, Existing Methods For Energy Metering In Multicores Fail To Provide Accurate Energy Estimates For Each Task When Several Tasks Run Simultaneously. This Article Makes A Case For Accurate Per-task Energy Metering (ptem) Based On Tracking The Resource Utilization And Occupancy Of Each Task. Different Hardware Implementations With Different Trade-offs Between Energy Prediction Accuracy And Hardware-implementation Complexity Are Proposed. Our Evaluation Shows That The Energy Consumed In A Multicore By Each Task Can Be Accurately Measured. For A 32-core, 2-way, Simultaneous Multithreaded Core Setup, Ptem Reduces The Average Accuracy Error From More Than 12% When Our Hardware Support Is Not Used To Less Than 4% When It Is Used. The Maximum Observed Error For Any Task In The Workload We Used Reduces From 58% Down To 9% When Our Hardware Support Is Used.
BibTeX:
@article{liu-hardware:2013,
  author = {Liu, Qixiao And Moreto, Miquel And Jimenez, Victor And Abella, Jaume And Cazorla, Francisco J. And Valero, Mateo},
  title = {Hardware Support For Accurate Per-task Energy Metering In Multicore Systems},
  journal = {ACM Transactions On Architecture And Code Optimization},
  year = {2013},
  volume = {10},
  number = {4},
  pages = {341--3427},
  doi = {10.1145/2541228.2555291}
}
Nunez D, Fernandez-gago C, Pearson S and Felici M (2013), "A Metamodel For Measuring Accountability Attributes In The Cloud", In 2013 IEEE 5th International Conference On Cloud Computing Technology And Science (cloudcom)., December, 2013. Vol. 1, pp. 355-362.
Abstract: Cloud Governance, And In Particular Data Governance In The Cloud, Relies On Different Technical And Organizational Practices And Procedures, Such As Policy Enforcement, Risk Management, Incident Management And Remediation. The Concept Of Accountability Encompasses Such Practices, And Is Essential For Enhancing Security And Trustworthiness In The Cloud. Besides This, Proper Measurement Of Cloud Services, Both At A Technical And Governance Level, Is A Distinctive Aspect Of The Cloud Computing Model. Hence, A Natural Problem That Arises Is How To Measure The Impact On Accountability Of The Procedures Held In Practice By Organizations That Participate In The Cloud Ecosystem. In This Paper, We Describe A Metamodel For Addressing The Problem Of Measuring Accountability Properties For Cloud Computing, As Discussed And Defined By The Cloud Accountability Project (a4cloud). The Goal Of This Metamodel Is To Act As A Language For Describing: (i) Accountability Properties In Terms Of Actions Between Entities, And (ii) Metrics For Measuring The Fulfillment Of Such Properties. It Also Allows The Recursive Decomposition Of Properties And Metrics, From A High-level And Abstract World To A Tangible And Measurable One. Finally, We Illustrate Our Proposal Of The Metamodel By Modelling The Transparency Property, And Define Some Metrics For It.
BibTeX:
@inproceedings{nunez-metamodel:2013,
  author = {Nunez, D. And Fernandez-gago, C. And Pearson, S. And Felici, M.},
  title = {A Metamodel For Measuring Accountability Attributes In The Cloud},
  booktitle = {2013 IEEE 5th International Conference On Cloud Computing Technology And Science (cloudcom)},
  year = {2013},
  volume = {1},
  pages = {355--362},
  doi = {10.1109/CloudCom.2013.53}
}
Romer R (2013), "Tier: Como é Feita A Classificação E Quais As Diferenças Entre Data Centers?". December, 2013.
Abstract: Assim Como Grande Parte Dos Serviços Oferecidos No Mercado, Data Centers Também Possuem Uma Classificação Própria Que Indica O Quão Preparados Eles Estão Para Lidar Com Problemas E Quão Sólidas São Suas Infra-estruturas. Entenda.
BibTeX:
@misc{romer-tier:2013,
  author = {Romer, Rafael},
  title = {Tier: Como é Feita A Classificação E Quais As Diferenças Entre Data Centers?},
  year = {2013}
}
Jin X, Wang H, Wang J, Cheng S and Li J (2013), "A Partners Assisted Virtual Machine Live Storage Migration for Intensive Disk I/O Workloads", In 2013 IEEE 10th International Conference on High Performance Computing and Communications 2013 IEEE International Conference on Embedded and Ubiquitous Computing., November, 2013. , pp. 1693-1698.
Abstract: Live migration of virtual machine (VM) enables mobility of VM and contributes to advantages of virtualization like energy saving, high availability, fault tolerance and work load balancing. However solutions of VMs' migration in both theoretical and industrial areas concentrate more on memory migration other than storage migration. Lots of applications with intensive disk I/O operations rely on local storage, especially when it comes to high performance computing. Migration of shared storage is also of necessity for consolidation and workload balance. Current approaches on storage migration can hardly work effectively in disk I/O intensive environment. They cannot reduce migration time and guarantee the disk I/O performance of VMs at the same time. This paper proposes an approach called Partners Assisted Storage Migration (PASM). We are the first to utilize disk I/O ability of pre-allocated storage nodes to relieve the competition between VMs' intensive disk I/O and storage migration. It can migrate VMs' storage effectively comparing to current methods: post-copy and write-mirror. Experiments including single VM's migration and multiple VMs' migration show that PASM can save 78.9% migration time and achieve additional 27.1% in disk I/O performance over existing methods.
BibTeX:
@inproceedings{jin-partners:2013,
  author = {Jin, X. and Wang, H. and Wang, J. and Cheng, S. and Li, J.},
  title = {A Partners Assisted Virtual Machine Live Storage Migration for Intensive Disk I/O Workloads},
  booktitle = {2013 IEEE 10th International Conference on High Performance Computing and Communications 2013 IEEE International Conference on Embedded and Ubiquitous Computing},
  year = {2013},
  pages = {1693--1698},
  doi = {10.1109/HPCC.and.EUC.2013.240}
}
Righi RDR (2013), "Elasticidade em cloud computing: conceito, estado da arte e novos desafios", Revista Brasileira de Computa??o Aplicada., November, 2013. Vol. 5(2)
BibTeX:
@article{righi-elasticidade:2013,
  author = {Righi, Rodrigo Da Rosa},
  title = {Elasticidade em cloud computing: conceito, estado da arte e novos desafios},
  journal = {Revista Brasileira de Computa??o Aplicada},
  year = {2013},
  volume = {5},
  number = {2},
  url = {http://www.upf.br/seer/index.php/rbca/article/view/3084},
  doi = {10.5335/rbca.2013.3084}
}
Nelson J, Santala T, Lenchner J, Calio R, Frissora M and Miller J (2013), "Locating And Tracking Data Center Assets Using Active Rfid Tags And A Mobile Robot", In 2013 10th International Conference And Expo On Emerging Technologies For A Smarter World (cewit)., October, 2013. , pp. 1-6.
Abstract: We Describe An Approach To Completely Automated Asset Tracking In Data Centers Using A Vision-based Mobile Robot In Conjunction With Active Rfid Tags. Typically, Active Rfid Tags Are Tracked Using Fixed Readers. The Granularity With Which One Can Localize Tags Is Based On The Number Of These Fixed Readers, Each Of Which Is Not Inexpensive. In Large Data Centers, The Cost Of Such A Solution Can Therefore Become Excessive. We Describe The Use Of A Mobile Robot, Utilizing A Single Rfid Reader And A Variety Of Algorithms For Locating The Plethora Of Rfid Tags One Can Encounter In Such Data Centers. These Approaches Are Validated Through Experiments Performed In A Production Industrial Data Center.
BibTeX:
@inproceedings{nelson-locating:2013,
  author = {Nelson, J.c. And Santala, T. And Lenchner, J. And Calio, R. And Frissora, M. And Miller, J.e.},
  title = {Locating And Tracking Data Center Assets Using Active Rfid Tags And A Mobile Robot},
  booktitle = {2013 10th International Conference And Expo On Emerging Technologies For A Smarter World (cewit)},
  year = {2013},
  pages = {1--6},
  doi = {10.1109/CEWIT.2013.6713757}
}
Xu Y, Deng Y and Du L (2013), "Calculating The Power Usage Effectiveness Of Data Centers By Using Weighted Average Workload", In 2013 8th 3pgcic., October, 2013.
Abstract: The Traditional Approaches Of Calculating Pue (power Usage Effectiveness) In Data Centers Ignore The Impacts Of The Time Factor. The Methods Cannot Accurately Reflect The Long-term Operation Efficiency Of Data Centers. This Paper Investigates The Impact Of Workload On The Pue, And Proposes To Compute The Pue By Leveraging Weighted Average Workload. Theoretical Analysis And Experimental Evaluation Demonstrate That Workload Normally Has A Significant Impact On The Average Pue. The Proposed Approach Can Achieve A More Accurate Pue In Contrast To The Traditional Methods.
BibTeX:
@inproceedings{xu-calculating:2013,
  author = {Xu, Yongmei And Deng, Yuhui And Du, Lan},
  title = {Calculating The Power Usage Effectiveness Of Data Centers By Using Weighted Average Workload},
  booktitle = {2013 8th 3pgcic},
  year = {2013},
  doi = {10.1109/3PGCIC.2013.54}
}
Center IK (2013), "Critérios De Design Ambiental Para Instalação Do Servidor Power7". September, 2013.
BibTeX:
@misc{ibm-criterios:2013,
  author = {Ibm Knowledge Center},
  title = {Critérios De Design Ambiental Para Instalação Do Servidor Power7},
  year = {2013},
  url = {http://www-01.ibm.com/support/knowledgecenter/api/content/nl/pt-br/POWER7/p7ebe/p7ebetempandhumiditydesign.htm}
}
Stroyan J and Brown N (2013), "Study on the contribution of standardization to innovation in European-funded research projects". Thesis at: technopolis Group. UK, September, 2013. (Final), pp. 122.
BibTeX:
@techreport{stroyan-study:2013,
  author = {Stroyan, James and Brown, Neil},
  title = {Study on the contribution of standardization to innovation in European-funded research projects},
  school = {technopolis Group},
  year = {2013},
  number = {Final},
  pages = {122},
  url = {https://www.cencenelec.eu/standards/Education/JointWorkingGroup/Documents/Study-Contribution-Standardization-Innovation-Final2013.pdf}
}
Volk E, Tenschert A, Gienger M, Oleksiak A, Siso L and Salom J (2013), "Improving Energy Efficiency In Data Centers And Federated Cloud Environments: Approaches And Metrics", In 2013 3rd Cgc., September, 2013. , pp. 443-450.
Abstract: Significant Data Centers Energy Footprints And The Increase In Energy Prices Have Stimulated Investigations Into Possible Metrics And Methods To Define, Quantify And Improve The Energy Efficiency Of Data Centers And Federated Cloud Environments. Studies Include Metrics And Analyses From Various Points Of Views, That Address Both Design And Operation Phases. In This Paper We Present Two Complementary Energy-efficiency Optimization Approaches Covered In The Scope Of Eu Projects: Coolemall - With Focus On Building Energy Efficient Data Centers, And Eco2clouds - With Focus On Energy-efficient Cloud-application Deployment In Federated Cloud-environments, And Describe Metrics Applied In These Projects To Assess And Optimize Energy-efficiency. Both Approaches Make Use Of Metrics To Assess Energy-efficiency Of Data Center- And Cloud Resources, And Energy-costs Of Application/workload Execution For Various Data Center Granularity Levels And Federation-sites.
BibTeX:
@inproceedings{volk-improving:2013,
  author = {Volk, E. And Tenschert, A. And Gienger, M. And Oleksiak, A. And Siso, L. And Salom, J.},
  title = {Improving Energy Efficiency In Data Centers And Federated Cloud Environments: Approaches And Metrics},
  booktitle = {2013 3rd Cgc},
  year = {2013},
  pages = {443--450},
  doi = {10.1109/CGC.2013.76}
}
Bubon RJ (2013), "Envmon". July, 2013.
BibTeX:
@misc{envmon:2013,
  author = {Bubon, Robert J.},
  title = {Envmon},
  year = {2013},
  url = {http://www.bigi.com/wiki/Envmon}
}
Liu H, Jin H, Xu C-Z and Liao X (2013), "Performance and energy modeling for live migration of virtual machines", Cluster Computing., June, 2013. Vol. 16(2), pp. 249-264.
Abstract: Live migration of virtual machine (VM) provides a significant benefit for virtual server mobility without disrupting service. It is widely used for system management in virtualized data centers. However, migration costs may vary significantly for different workloads due to the variety of VM configurations and workload characteristics. To take into account the migration overhead in migration decision-making, we investigate design methodologies to quantitatively predict the migration performance and energy consumption. We thoroughly analyze the key parameters that affect the migration cost from theory to practice. We construct application-oblivious models for the cost prediction by using learned knowledge about the workloads at the hypervisor (also called VMM) level. This should be the first kind of work to estimate VM live migration cost in terms of both performance and energy in a quantitative approach. We evaluate the models using five representative workloads on a Xen virtualized environment. Experimental results show that the refined model yields higher than 90% prediction accuracy in comparison with measured cost. Model-guided decisions can significantly reduce the migration cost by more than 72.9% at an energy saving of 73.6%.
BibTeX:
@article{Liu2013,
  author = {Liu, Haikun and Jin, Hai and Xu, Cheng-Zhong and Liao, Xiaofei},
  title = {Performance and energy modeling for live migration of virtual machines},
  journal = {Cluster Computing},
  year = {2013},
  volume = {16},
  number = {2},
  pages = {249--264},
  url = {https://link.springer.com/article/10.1007/s10586-011-0194-3},
  doi = {10.1007/s10586-011-0194-3}
}
Liu H, Jin H, Xu C-Z and Liao X (2013), "Performance and energy modeling for live migration of virtual machines", Cluster Computing., June, 2013. Vol. 16(2), pp. 249-264.
BibTeX:
@article{liu-performance:2013,
  author = {Liu, Haikun and Jin, Hai and Xu, Cheng-Zhong and Liao, Xiaofei},
  title = {Performance and energy modeling for live migration of virtual machines},
  journal = {Cluster Computing},
  year = {2013},
  volume = {16},
  number = {2},
  pages = {249--264},
  url = {http://link.springer.com/10.1007/s10586-011-0194-3},
  doi = {10.1007/s10586-011-0194-3}
}
Patterson MK, Poole SW, Hsu C-h, Maxwell D, Tschudi W, Coles H, Martinez DJ and Bates N (2013), "Tue, A New Energy-efficiency Metric Applied At Ornl’s Jaguar", In Supercomputing., June, 2013. (7905), pp. 372-382. Springer Berlin Heidelberg.
Abstract: The Metric, Power Usage Effectiveness (pue), Has Been Successful In Improving Energy Efficiency Of Data Centers, But It Is Not Perfect. One Challenge Is That Pue Does Not Account For The Power Distribution And Cooling Losses Inside It Equipment. This Is Particularly Problematic In The Hpc (high Performance Computing) Space Where System Suppliers Are Moving Cooling And Power Subsystems Into Or Out Of The Cluster. This Paper Proposes Two New Metrics: Itue (it-power Usage Effectiveness), Similar To Pue But “inside” The It And Tue (total-power Usage Effectiveness), Which Combines The Two For A Total Efficiency Picture. We Conclude With A Demonstration Of The Method, And A Case Study Of Measurements At Ornl’s Jaguar System. Tue Provides A Ratio Of Total Energy, (internal And External Support Energy Uses) And The Specific Energy Used In The Hpc. Tue Can Also Be A Means For Comparing Hpc Site To Hpc Site.
BibTeX:
@incollection{patterson-tue:2013,
  author = {Patterson, Michael K. And Poole, Stephen W. And Hsu, Chung-hsing And Maxwell, Don And Tschudi, William And Coles, Henry And Martinez, David J. And Bates, Natalie},
  editor = {Kunkel, Julian Martin And Ludwig, Thomas And Meuer, Hans Werner},
  title = {Tue, A New Energy-efficiency Metric Applied At Ornl’s Jaguar},
  booktitle = {Supercomputing},
  publisher = {Springer Berlin Heidelberg},
  year = {2013},
  number = {7905},
  pages = {372--382},
  note = {Doi: 10.1007/978-3-642-38750-028},
  doi = {10.1007/978-3-642-38750-0_28}
}
Chen H, Kang H, Jiang G and Zhang Y (2013), "Network-aware coordination of virtual machine migrations in enterprise data centers and clouds", In 2013 IFIP/IEEE International Symposium on Integrated Network Management (IM 2013)., May, 2013. , pp. 888-891.
Abstract: Virtual machine(VM) migration usually requires a considerable amount of system resources such as the network bandwidth. In the case of multiple simultaneous migrations, such resource demands will increase dramatically and are difficult to be satisfied immediately. This paper proposes a scheduling method for multiple VM migrations to guarantee the fast completion of those tasks and hence the reduced impacts on system performance. We discover the best bandwidth sharing policy for each network link, and further propose a bin-packing algorithm to organize bandwidth resources from all the network links. As a result, the migration tasks can fully utilize available resources in the whole network to achieve the fast completion.
BibTeX:
@inproceedings{chen-network-aware:2013,
  author = {Chen, H. and Kang, H. and Jiang, G. and Zhang, Y.},
  title = {Network-aware coordination of virtual machine migrations in enterprise data centers and clouds},
  booktitle = {2013 IFIP/IEEE International Symposium on Integrated Network Management (IM 2013)},
  year = {2013},
  pages = {888--891}
}
Schwartz T, Stevens G, Ramirez L and Wulf V (2013), "Uncovering Practices Of Making Energy Consumption Accountable: A Phenomenological Inquiry", ACM Transactions On Computer-human Interaction., May, 2013. Vol. 20(2), pp. 121-1230.
Abstract: Reacting To The Discussion On Global Warming, The Hci Community Has Started To Explore The Design Of Tools To Support Responsible Energy Consumption. An Important Part Of This Research Focuses On Motivating Energy Savings By Providing Feedback Tools Which Present Consumption Metrics Interactively. In This Line Of Work, The Configuration Of Feedback Has Been Mainly Discussed Using Cognitive Or Behavioral Factors. This Narrow Focus, However, Misses A Highly Relevant Perspective For The Design Of Technology That Supports Sustainable Lifestyles: To Investigate The Multiplicity Of Forms In Which Individuals Or Collectives Actually Consume Energy. In This Article, We Broaden This Focus, By Taking A Phenomenological Lens To Study How People Use Off-the-shelf Eco-feedback Systems In Private Households To Make Energy Consumption Accountable And Explainable. By Reconstructing Accounting Practices, We Delineate Several Constitutive Elements Of The Phenomenon Of Energy Usage In Daily Life. We Complement These Elements With A Description Of The Sophisticated Methods Used By People To Organize Their Energy Practices And To Give A Meaning To Their Energy Consumption. We Describe These Elements And Methods, Providing Examples Coming From The Fieldwork And Uncovering Observed Strategies To Account For Consumption. Based On Our Results, We Provide A Critical Perspective On Existing Eco-feedback Mechanisms And Describe Several Elements For A Design Rationale For Designing Support For Responsible Energy Consumption. We Argue That Interactive Feedback Systems Should Not Simply Be An End, But Rather A Resource For The Construction Of The Artful Practice Of Making Energy Consumption Accountable.
BibTeX:
@article{schwartz-uncovering:2013,
  author = {Schwartz, Tobias And Stevens, Gunnar And Ramirez, Leonardo And Wulf, Volker},
  title = {Uncovering Practices Of Making Energy Consumption Accountable: A Phenomenological Inquiry},
  journal = {ACM Transactions On Computer-human Interaction},
  year = {2013},
  volume = {20},
  number = {2},
  pages = {121--1230},
  doi = {10.1145/2463579.2463583}
}
Simonin M, Feller E, Orgerie AC, Jégou Y and Morin C (2013), "An Autonomic and Scalable Management System for Private Clouds", In 2013 13th IEEE/ACM International Symposium on Cluster, Cloud, and Grid Computing., May, 2013. , pp. 198-199.
Abstract: Snooze is an open-source scalable, autonomic, and energy-efficient virtual machine (VM) management framework for private clouds. It allows users to build compute infrastructures from virtualized resources. Particularly, once installed and configured, it allows its users to submit and control the life-cycle of a large number of VMs. For scalability, the system relies on a self-organizing hierarchical architecture. Moreover, it implements self-healing mechanisms in case of failure to enable high availability. It also performs energy-efficient distributed VM management through consolidation and power management techniques. This poster focuses on the experimental validation of two main properties of Snooze: scalability and fault-tolerance.
BibTeX:
@inproceedings{simonin-autonomic:2013,
  author = {Simonin, M. and Feller, E. and Orgerie, A. C. and Jégou, Y. and Morin, C.},
  title = {An Autonomic and Scalable Management System for Private Clouds},
  booktitle = {2013 13th IEEE/ACM International Symposium on Cluster, Cloud, and Grid Computing},
  year = {2013},
  pages = {198--199},
  doi = {10.1109/CCGrid.2013.44}
}
Trobec R, Depolli M, Skala K and Lipic T (2013), "Energy Efficiency In Large-scale Distributed Computing Systems", In 2013 36th International Convention On Information Communication Technology Electronics Microelectronics (mipro)., May, 2013. , pp. 253-257.
Abstract: The Ever-increasing Energy Consumption In Large-scale Distributed Computing Systems Such As Clusters, Grids And Clouds Raises Social, Technical, Economical, And Environmental Concerns. Therefore, Designing Novel Energy-efficient Approaches, To Reduce Energy Consumption, At All Levels Of Distributed System Architecture Is Of Great Importance For The Whole Society. However, The Essential Step Towards The Introduction Of Energy-efficiency In Large-scale Distributed Systems Is To Measure The Power Consumption, Accurately, Reliably, And Continually In Each Component Of The System. This Paper Briefly Surveys The Current Approaches For Measuring And Profiling Power Consumption In Large Scale Distributed Systems. Furthermore, The Practical Case Study Of A Real-time Power Measurement In Multi-core Computing System, As A Basic Building Block Of A Distributed Computing System, Is Presented.
BibTeX:
@inproceedings{trobec-energy:2013,
  author = {Trobec, R. And Depolli, M. And Skala, K. And Lipic, T.},
  title = {Energy Efficiency In Large-scale Distributed Computing Systems},
  booktitle = {2013 36th International Convention On Information Communication Technology Electronics Microelectronics (mipro)},
  year = {2013},
  pages = {253--257}
}
Matsumoto K, Yamagiwa M, Uehara M and Mori H (2013), "Proposal Of Sensor Data Gathering With Single Board Computer", In 2013 27th International Conference On Advanced Information Networking And Applications Workshops (waina)., March, 2013. , pp. 162-167.
Abstract: In Japan, The Generation Of Electricity Emits 20% Of The Total Greenhouse Gases. The Use Of Home Electronics, Besides Consuming Power, Also Accounts For 46% Of The Green-house Gas Emissions. We Therefore Need To Reduce Our Power Consumption At Home To Reduce Greenhouse Gas Emissions, Without Sacrificing Home Comforts Such As Watching Television And Using Air Conditioners And Various Other Electronic Devices. We Propose A Sensor Data Gathering System Using A Single Board Computer For Home Energy Management Systems. This Sensor System Should Be Capable Of Handling 100 Sensor Data Per Second. Since A Single Board Computer Has Low Power Consumption And Low Performance, It Is Used As The Sensor Data Gathering Server. Based On Our Benchmark Evaluation, A Single Board Computer Can Receive 5000 Sensor Data Per Second. Since The Basic Requirement Is Only 100 Sensor Data Per Second, This Single Board Computer Has Adequate Performance To Act As An Aggregated Server For 50 Such Sensor Data Gathering Systems.
BibTeX:
@inproceedings{matsumoto-proposal:2013,
  author = {Matsumoto, K. And Yamagiwa, M. And Uehara, M. And Mori, H.},
  title = {Proposal Of Sensor Data Gathering With Single Board Computer},
  booktitle = {2013 27th International Conference On Advanced Information Networking And Applications Workshops (waina)},
  year = {2013},
  pages = {162--167},
  doi = {10.1109/WAINA.2013.102}
}
Open Source Hardware Association (2013), "Definição De Open Source Hardware (oshw) 1.0". March, 2013.
Abstract: A Versão Preliminar Da Definição 1.0 De Oshw Draft Está Baseada Na Open Source Definition Para Open Source Software E Na Versão Preliminar Da Definição 0.5 De Oshw. Esta Definição é Uma Derivação Da Open Source Definition, Que Foi Criada Por Bruce Perens E Os Desenvolvedores Do Debian Como As Guias De Desenvolvimento Do Debian Free Software.
BibTeX:
@misc{oshw-definicao:2013,
  author = {Open Source Hardware Association},
  title = {Definição De Open Source Hardware (oshw) 1.0},
  year = {2013}
}
Rady M (2013), "Formal Definition of Service Availability in Cloud Computing Using OWL", In Computer Aided Systems Theory - EUROCAST 2013., February, 2013. , pp. 189-194. Springer, Berlin, Heidelberg.
Abstract: Fulfilling cloud customers needs entails describing a quality of service on top of the services functional description. Currently, the only guarantees that are offered by cloud providers are imprecise and incomplete Service Level Agreements (SLA). We present a model to describe one of the main attributes discussed in SLAs which is availability. The model is developed using Web Ontology Language OWL. And it aims at covering the different concepts of availability and availability-related attributes that should be present in a service contract in order to guarantee the quality of service the consumer is expecting.
BibTeX:
@inproceedings{rady-formal:2013,
  author = {Rady, Mariam},
  title = {Formal Definition of Service Availability in Cloud Computing Using OWL},
  booktitle = {Computer Aided Systems Theory - EUROCAST 2013},
  publisher = {Springer, Berlin, Heidelberg},
  year = {2013},
  pages = {189--194},
  url = {https://link.springer.com/chapter/10.1007/978-3-642-53856-8:24},
  doi = {10.1007/978-3-642-53856-8:24}
}
Almihoub AAA, Mula JM and Rahman MM (2013), "Marginal Abatement Cost Curves (MACCs): Important Approaches to Obtain (Firm and Sector) Greenhouse Gases (GHGs) Reduction", International Journal of Economics and Finance. Vol. 5(5)
BibTeX:
@article{almihoub-marginal:2013,
  author = {Almihoub, Ali Ahmed Ali and Mula, Joseph M. and Rahman, Mohammad Mafizur},
  title = {Marginal Abatement Cost Curves (MACCs): Important Approaches to Obtain (Firm and Sector) Greenhouse Gases (GHGs) Reduction},
  journal = {International Journal of Economics and Finance},
  year = {2013},
  volume = {5},
  number = {5},
  url = {http://www.ccsenet.org/journal/index.php/ijef/article/view/26701},
  doi = {10.5539/ijef.v5n5p35}
}
Aruna M, Bhanu D and Punithagowri R (2013), "A Survey On Load Balancing Algorithms In Cloud Environment", International Journal Of Computer Applications. Vol. 82(16), pp. 39-43.
BibTeX:
@article{aruna-survey:2013,
  author = {Aruna, M. And Bhanu, D. And Punithagowri, R.},
  title = {A Survey On Load Balancing Algorithms In Cloud Environment},
  journal = {International Journal Of Computer Applications},
  year = {2013},
  volume = {82},
  number = {16},
  pages = {39--43},
  url = {http://research.ijcaonline.org/volume82/number16/pxc3892472.pdf},
  doi = {10.5120/14251-2472}
}
Beloglazov A (2013), "Energy-efficient Management Of Virtual Machines In Data Centers For Cloud Computing". Thesis at: The University Of Melbourne. Melbourne, AU
BibTeX:
@mastersthesis{beloglazov-energy-efficient:2013,
  author = {Beloglazov, Anton},
  title = {Energy-efficient Management Of Virtual Machines In Data Centers For Cloud Computing},
  school = {The University Of Melbourne},
  year = {2013},
  url = {https://minerva-access.unimelb.edu.au/handle/11343/38198}
}
Chen H, Wang F, Helian N and Akanmu G (2013), "User-priority Guided Min-min Scheduling Algorithm For Load Balancing In Cloud Computing", In 2013 National Conference On Parallel Computing Technologies (parcomptech). , pp. 1-8.
Abstract: Cloud Computing Is Emerging As A New Paradigm Of Large-scale Distributed Computing. In Order To Utilize The Power Of Cloud Computing Completely, We Need An Efficient Task Scheduling Algorithm. The Traditional Min-min Algorithm Is A Simple, Efficient Algorithm That Produces A Better Schedule That Minimizes The Total Completion Time Of Tasks Than Other Algorithms In The Literature [7]. However The Biggest Drawback Of It Is Load Imbalanced, Which Is One Of The Central Issues For Cloud Providers. In This Paper, An Improved Load Balanced Algorithm Is Introduced On The Ground Of Min-min Algorithm In Order To Reduce The Makespan And Increase The Resource Utilization (lbimm). At The Same Time, Cloud Providers Offer Computer Resources To Users On A Pay-per-use Base. In Order To Accommodate The Demands Of Different Users, They May Offer Different Levels Of Quality For Services. Then The Cost Per Resource Unit Depends On The Services Selected By The User. In Return, The User Receives Guarantees Regarding The Provided Resources. To Observe The Promised Guarantees, User-priority Was Considered In Our Proposed Pa-lbimm So That User's Demand Could Be Satisfied More Completely. At Last, The Introduced Algorithm Is Simulated Using Matlab Toolbox. The Simulation Results Show That The Improved Algorithm Can Lead To Significant Performance Gain And Achieve Over 20% Improvement On Both Vip User Satisfaction And Resource Utilization Ratio.
BibTeX:
@inproceedings{chen-user-priority:2013,
  author = {Chen, Huankai And Wang, F. And Helian, N. And Akanmu, G.},
  title = {User-priority Guided Min-min Scheduling Algorithm For Load Balancing In Cloud Computing},
  booktitle = {2013 National Conference On Parallel Computing Technologies (parcomptech)},
  year = {2013},
  pages = {1--8},
  doi = {10.1109/ParCompTech.2013.6621389}
}
Dias DdS (2013), "Posicionamento online baseado no tráfego de máquinas virtuais em redes de data center". Thesis at: Universidade Federal do Rio de Janeiro. Rio de Janeiro
BibTeX:
@phdthesis{dias-posicionamento:2013,
  author = {Dias, Daniel de Souza},
  title = {Posicionamento online baseado no tráfego de máquinas virtuais em redes de data center},
  school = {Universidade Federal do Rio de Janeiro},
  year = {2013},
  url = {http://pee.ufrj.br/teses/textocompleto/2013032201.pdf}
}
Dong YaoZu, Ye W, Jiang YunHong, Pratt I, Ma ShiQing, Li J and Guan HaiBing (2013), "COLO: COarse-grained LOck-stepping virtual machines for non-stop service", ACM Press. , pp. 1-16. ACM Press.
BibTeX:
@article{dong-colo:2013,
  author = {Dong, YaoZu and Ye, Wei and Jiang, YunHong and Pratt, Ian and Ma, ShiQing and Li, Jian and Guan, HaiBing},
  title = {COLO: COarse-grained LOck-stepping virtual machines for non-stop service},
  journal = {ACM Press},
  publisher = {ACM Press},
  year = {2013},
  pages = {1--16},
  url = {http://dl.acm.org/citation.cfm?doid=2523616.2523630},
  doi = {10.1145/2523616.2523630}
}
Dósa G, Li R, Han X and Tuza Z (2013), "Tight absolute bound for First Fit Decreasing bin-packing: FFD (L) <= 11/9 OPT (L) + 6/9", Theoretical Computer Science. Vol. 510, pp. 13-61.
Abstract: First Fit Decreasing is a classical bin-packing algorithm: the items are ordered by non-increasing size, and then in this order the next item is always packed into the first bin where it fits. For an instance L let FFD(L) and OPT(L) denote the number of bins used by algorithm FFD and by an optimal algorithm, respectively. In this paper we give the first complete proof of the inequalityFFD(L)<=11 /9×OPT(L)+6/9. This result is best possible, as was shown earlier by Dósa (2007) [3]. The asymptotic coefficient 11/9 was proved already in 1973 by Johnson, but the tight bound of the additive constant was an open question for four decades. © 2013 Elsevier B.V.
BibTeX:
@article{dosa-tight:2013,
  author = {Dósa, G. and Li, R. and Han, X. and Tuza, Z.},
  title = {Tight absolute bound for First Fit Decreasing bin-packing: FFD (L) <= 11/9 OPT (L) + 6/9},
  journal = {Theoretical Computer Science},
  year = {2013},
  volume = {510},
  pages = {13--61},
  doi = {10.1016/j.tcs.2013.09.007}
}
Fernandez R (2013), "O Uso De Softwares Livres Na Gestão Pública De Acervos Informacionais: O Caso Do Koha Nas Bibliotecas De São Bernardo Do Campo", Informação & Informação. Vol. 18(2), pp. 231-248.
BibTeX:
@article{fernandez-usosl:2013,
  author = {Rafael Fernandez},
  title = {O Uso De Softwares Livres Na Gestão Pública De Acervos Informacionais: O Caso Do Koha Nas Bibliotecas De São Bernardo Do Campo},
  journal = {Informação & Informação},
  year = {2013},
  volume = {18},
  number = {2},
  pages = {231--248},
  url = {http://www.uel.br/revistas/uel/index.php/informacao/article/view/16174}
}
Gartner (2013), "Gartner It Glossary: Term ``data Center''".
Abstract: The Data Center Is The Department In An Enterprise That Houses And Maintains Back-end Information Technology (it) Systems And Data Stores---its Mainframes, Servers And Databases. In The Days Of Large, Centralized It Operations, This Department And All The Systems Resided In One Physical Place, Hence The Name Data Center. With Today’s More Distributed Computing Methods,...
BibTeX:
@misc{gartner-gartner:2013,
  author = {Gartner},
  title = {Gartner It Glossary: Term ``data Center''},
  journal = {Gartner It Glossary},
  year = {2013}
}
Guo Y, Jones M, Cowan B and Beale R (2013), "Take It Personally: Personal Accountability And Energy Consumption In Domestic Households", In Chi '13 Extended Abstracts On Human Factors In Computing Systems. New York, Ny, Usa , pp. 1467-1472. ACM.
Abstract: We Explore The Overlooked Area Of Personal Energy Consumption In The Context Of A Shared Domestic Household. We Discuss The Potential Benefits Of Such An Approach. We Report The Results Of A Lab Study And Field Trial With Four Households Using A Personal Energy Monitoring System. We Describe The Results Of The Studies And Discuss How Such Previously Hidden Information Might Raise Awareness Of Individual Energy Consumption And The Benefits And Problems This Entails.
BibTeX:
@inproceedings{guo-take:2013,
  author = {Guo, Yukang And Jones, Matt And Cowan, Benjamin And Beale, Russell},
  title = {Take It Personally: Personal Accountability And Energy Consumption In Domestic Households},
  booktitle = {Chi '13 Extended Abstracts On Human Factors In Computing Systems},
  publisher = {ACM},
  year = {2013},
  pages = {1467--1472},
  doi = {10.1145/2468356.2468618}
}
Kroll B, Schriegel S, Niggemann O and Schramm S (2013), "A Software Architecture For The Analysis Of Energy And Process-data", In 2013 IEEE 18th Conference On Emerging Technologies Factory Automation (etfa). , pp. 1-4. IEEE.
Abstract: This Paper Contributes A Framework That Helps To Fulfill The Requirements Of The Standards Din En 16247 And Iso 50001 By Combining (i) A Synchronized Data Acquisition, (ii) Data Integration, (iii) Learning Of Normal Behavior Models And (iv) A Implementation Of An Anomaly Detection As Prototype. Both Standards Require A Reliable Data Acquisition And Energy Consumption Analysis For Implementing A Certified Energy Management System. It Shows That This Framework Meets The Specifications Of The Standards By Implementing A Combined Data Acquisition And Anomaly Detection Approach.
BibTeX:
@inproceedings{kroll-software:2013,
  author = {Kroll, B. And Schriegel, S. And Niggemann, O. And Schramm, S.},
  title = {A Software Architecture For The Analysis Of Energy And Process-data},
  booktitle = {2013 IEEE 18th Conference On Emerging Technologies Factory Automation (etfa)},
  publisher = {IEEE},
  year = {2013},
  pages = {1--4},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6648146},
  doi = {10.1109/ETFA.2013.6648146}
}
Marinescu DC (2013), "Cloud Computing: Theory And Practice" Newnes.
Abstract: Cloud Computing: Theory And Practice Provides Students And It Professionals With An In-depth Analysis Of The Cloud From The Ground Up. Beginning With A Discussion Of Parallel Computing And Architectures And Distributed Systems, The Book Turns To Contemporary Cloud Infrastructures, How They Are Being Deployed At Leading Companies Such As Amazon, Google And Apple, And How They Can Be Applied In Fields Such As Healthcare, Banking And Science. The Volume Also Examines How To Successfully Deploy A Cloud Application Across The Enterprise Using Virtualization, Resource Management And The Right Amount Of Networking Support, Including Content Delivery Networks And Storage Area Networks. Developers Will Find A Complete Introduction To Application Development Provided On A Variety Of Platforms.learn About Recent Trends In Cloud Computing In Critical Areas Such As: Resource Management, Security, Energy Consumption, Ethics, And Complex Systems Get A Detailed Hands-on Set Of Practical Recipes That Help Simplify The Deployment Of A Cloud Based System For Practical Use Of Computing Clouds Along With An In-depth Discussion Of Several Projectsunderstand The Evolution Of Cloud Computing And Why The Cloud Computing Paradigm Has A Better Chance To Succeed Than Previous Efforts In Large-scale Distributed Computing
BibTeX:
@book{marinescu-cloud:2013,
  author = {Marinescu, Dan C.},
  title = {Cloud Computing: Theory And Practice},
  publisher = {Newnes},
  year = {2013}
}
Mhedheb Y, Jrad F, Tao J, Zhao J, Kolodziej J and Streit A (2013), "Load And Thermal-aware Vm Scheduling On The Cloud", In Algorithms And Architectures For Parallel Processing. (8285), pp. 101-114. Springer International Publishing.
Abstract: Virtualization Is One Of The Key Technologies That Enable Cloud Computing, A Novel Computing Paradigm Aiming At Provisioning On-demand Computing Capacities As Services. With The Special Features Of Self-service And Pay-as-you-use, Cloud Computing Is Attracting Not Only Personal Users But Also Small And Middle Enterprises. By Running Applications On The Cloud, Users Need Not Maintain Their Own Servers Thus To Save Administration Cost. Cloud Computing Uses A Business Model Meaning That The Operation Overhead Must Be A Major Concern Of The Cloud Providers. Today, The Payment Of A Data Centre On Energy May Be Larger Than The Overall Investment On The Computing, Storage And Network Facilities. Therefore, Saving Energy Consumption Is A Hot Topic Not Only In Cloud Computing But Also For Other Domains. This Work Proposes And Implements A Virtual Machine (vm) Scheduling Mechanism That Targets On Both Load-balancing And Temperature-balancing With A Final Goal Of Reducing The Energy Consumption In A Cloud Centre. Using The Strategy Of Vm Migration It Is Ensured That None Of The Physical Hosts Suffers From Either High Temperature Or Over-utilization. The Proposed Scheduling Mechanism Has Been Evaluated On Cloudsim, A Well-known Simulator For Cloud Computing. Initial Experimental Results Show A Significant Benefit In Terms Of Energy Consumption.
BibTeX:
@incollection{mhedheb-load:2013,
  author = {Mhedheb, Yousri And Jrad, Foued And Tao, Jie And Zhao, Jiaqi And Kolodziej, Joanna And Streit, Achim},
  editor = {Kolodziej, Joanna And Martino, Beniamino Di And Talia, Domenico And Xiong, Kaiqi},
  title = {Load And Thermal-aware Vm Scheduling On The Cloud},
  booktitle = {Algorithms And Architectures For Parallel Processing},
  publisher = {Springer International Publishing},
  year = {2013},
  number = {8285},
  pages = {101--114},
  note = {Doi: 10.1007/978-3-319-03859-98},
  doi = {10.1007/978-3-319-03859-9_8}
}
Molina JAN and Mishra S (2013), "Addressing memory exhaustion failures in Virtual Machines in a cloud environment", In Dependable Systems and Networks (DSN), 2013 43rd Annual IEEE/IFIP International Conference on. , pp. 1-6. IEEE.
BibTeX:
@inproceedings{molina-addressing:2013,
  author = {Molina, Jose Antonio Navas and Mishra, Shivakant},
  title = {Addressing memory exhaustion failures in Virtual Machines in a cloud environment},
  booktitle = {Dependable Systems and Networks (DSN), 2013 43rd Annual IEEE/IFIP International Conference on},
  publisher = {IEEE},
  year = {2013},
  pages = {1--6},
  url = {http://ieeexplore.ieee.org/abstract/document/6575330/}
}
Neto MF (2013), "Os Principais Sistemas De Automação De Data Centers Do Mercado - Dcim". Thesis at: Fazion Ltda. , pp. 17.
BibTeX:
@techreport{neto:2013,
  author = {Neto, Moacyr Franco},
  title = {Os Principais Sistemas De Automação De Data Centers Do Mercado - Dcim},
  school = {Fazion Ltda},
  year = {2013},
  pages = {17},
  url = {http://www.fazion.com.br/netcom2013/moacyrfranco_doc1.pdf}
}
Neto MF (2013), "Os Principais Sistemas De Automação De Data Centers Do Mercado - Dcim: Uma Visão Técnica E Metodologias De Escolha".
BibTeX:
@misc{neto-dcim:2013,
  author = {Neto, Moacyr Franco},
  title = {Os Principais Sistemas De Automação De Data Centers Do Mercado - Dcim: Uma Visão Técnica E Metodologias De Escolha},
  year = {2013}
}
Park S, Choi M-i, Kang B and Park S (2013), "Design And Implementation Of Smart Energy Management System For Reducing Power Consumption Using Zigbee Wireless Communication Module", Procedia Computer Science. Vol. 19, pp. 662-668.
Abstract: In This Paper, We Propose A Smart Energy Management System (sems) Which Functions As A Control Using A Motion Sensor And Setting Time Of Power Usage To Reduce Power Consumption. The Sems Not Only Supplies Power As The Way The Common Power Strips Do But Also Controls Sockets Of The Sems Using Zigbee Wireless Communication. A Test Bed For Experiment Consists Of A Motion Sensor, The Sems And Three Appliances Which Are Connected To The Sems And The Experiment Was Conducted For Five Days To Measure The Power Consumption Of Three Appliances With Regard To Both Functions. The Experimental Result Shows That The Power Consumption Of The Sems With Two Functions Is Considerably Reduced When Compared With The Power Consumption Of The Common Power Strip.
BibTeX:
@article{park-design:2013,
  author = {Park, Sunghoi And Choi, Myeong-in And Kang, Byeongkwan And Park, Sehyun},
  title = {Design And Implementation Of Smart Energy Management System For Reducing Power Consumption Using Zigbee Wireless Communication Module},
  journal = {Procedia Computer Science},
  year = {2013},
  volume = {19},
  pages = {662--668},
  url = {http://www.sciencedirect.com/science/article/pii/S1877050913006960},
  doi = {10.1016/j.procs.2013.06.088}
}
Qavami HR, Jamali S, Akbari MK and Javadi B (2013), "Dynamic Resource Provisioning In Cloud Computing : A Heuristic Markovian Approach", Proceedings Of The 4th International Conference On Cloud Computing (cloudcomp 2013), October 17 -- 19, 2013, Wuhan, China.
BibTeX:
@article{qavami-dynamic:2013,
  author = {Qavami, Hamid R. And Jamali, Shahram And Akbari, Mohammad K. And Javadi, Bahman},
  title = {Dynamic Resource Provisioning In Cloud Computing : A Heuristic Markovian Approach},
  journal = {Proceedings Of The 4th International Conference On Cloud Computing (cloudcomp 2013), October 17 -- 19, 2013, Wuhan, China},
  year = {2013},
  url = {http://researchdirect.uws.edu.au/islandora/object/uws%3A19911/}
}
Samarakoon S and Rajini P (2013), "Enablers And Barriers Of Implementing Iso 50001- Energy Management Systems (enms) In Sri Lankan Context.", In Socio-economic Sustainability In Construction: Practice, Policy And Research. Colombo, Sri Lanka Vol. 2, pp. 208-217.
BibTeX:
@inproceedings{samarakoon-enablers:2013,
  author = {Samarakoon, S.b.r.g.k. And Rajini, P.a.d.},
  title = {Enablers And Barriers Of Implementing Iso 50001- Energy Management Systems (enms) In Sri Lankan Context.},
  booktitle = {Socio-economic Sustainability In Construction: Practice, Policy And Research},
  year = {2013},
  volume = {2},
  pages = {208--217},
  url = {http://www.irbnet.de/daten/iconda/CIB_DC26715.pdf}
}
Sproutboard (2013), "Technical Overview Guides - Sproutboard".
BibTeX:
@misc{sproutboard:2013,
  author = {Sproutboard},
  title = {Technical Overview Guides - Sproutboard},
  year = {2013},
  url = {http://www.sproutboard.com/technical-overview}
}
Strunk A and Dargie W (2013), "Does live migration of virtual machines cost energy?", In Advanced Information Networking and Applications (AINA), 2013 IEEE 27th International Conference on. , pp. 514-521. IEEE.
BibTeX:
@inproceedings{strunk-does:2013,
  author = {Strunk, Anja and Dargie, Waltenegus},
  title = {Does live migration of virtual machines cost energy?},
  booktitle = {Advanced Information Networking and Applications (AINA), 2013 IEEE 27th International Conference on},
  publisher = {IEEE},
  year = {2013},
  pages = {514--521},
  url = {http://ieeexplore.ieee.org/abstract/document/6531798/}
}
Tiaonline.org PR (2013), "Tia Issues New Telecommunications Infrastructure Standard On Data Center Switch Fabrics In Order To Support Cloud Computing Growth".
BibTeX:
@misc{tiaonline.org-tia:2013,
  author = {Tiaonline.org, Press Release},
  title = {Tia Issues New Telecommunications Infrastructure Standard On Data Center Switch Fabrics In Order To Support Cloud Computing Growth},
  journal = {Telecommunications Industry Association (tia)},
  year = {2013},
  url = {http://www.tiaonline.org/news-media/press-releases/tia-issues-new-telecommunications-infrastructure-standard-data-center}
}
Stansberry M (2013), "Uptime Institute - Data Center Industry Survey 2013".
Abstract: The Third Annual Uptime Institute Data Center Industry Survey Is An Indepth Study, Collecting Responses Via Email (february To April 2013) From 1,000 Data Center Facilities Operators, It Managers And Senior Executives From Around The Globe.
BibTeX:
@misc{uptime-dc-survey:2013,
  author = {Stansberry, Matt},
  title = {Uptime Institute - Data Center Industry Survey 2013},
  publisher = {Uptime Institute},
  year = {2013}
}
Wang Y, Lin X, Pedram M, Park S and Chang N (2013), "Optimal Control Of A Grid-connected Hybrid Electrical Energy Storage System For Homes", In Proceedings Of The Conference On Design, Automation And Test In Europe. San Jose, Ca, Usa , pp. 881-886. Eda Consortium.
Abstract: Integrating Residential Photovoltaic (pv) Power Generation And Electrical Energy Storage (ees) Systems Into The Smart Grid Is An Effective Way Of Utilizing Renewable Power And Reducing The Consumption Of Fossil Fuels. This Has Become A Particularly Interesting Problem With The Introduction Of Dynamic Electricity Energy Pricing Models Since Electricity Consumers Can Use Their Pv-based Energy Generation And Ees Systems For Peak Shaving On Their Power Demand Profile From The Grid, And Thereby, Minimize Their Electricity Bill. Due To The Characteristics Of A Realistic Electricity Price Function And The Energy Storage Capacity Limitation, The Control Algorithm For A Residential Ees System Should Accurately Account For Various Energy Loss Components During Operation. Hybrid Electrical Energy Storage (hees) Systems Are Proposed To Exploit The Strengths Of Each Type Of Ees Element And Hide Its Weaknesses So As To Achieve A Combination Of Performance Metrics That Is Superior To Those Of Any Of Its Individual Ees Components. This Paper Introduces The Problem Of How Best To Utilize A Hees System For A Residential Smart Grid User Equipped With Pv Power Generation Facilities. The Optimal Control Algorithm For The Hees System Is Developed, Which Aims At Minimization Of The Total Electricity Cost Over A Billing Period Under A General Electricity Energy Price Function. The Proposed Algorithm Is Based On Dynamic Programming And Has Polynomial Time Complexity. Experimental Results Demonstrate That The Proposed Hees System And Optimal Control Algorithm Achieves 73.9% Average Profit Enhancement Over Baseline Homogeneous Ees Systems.
BibTeX:
@inproceedings{wang-optimal:2013,
  author = {Wang, Yanzhi And Lin, Xue And Pedram, Massoud And Park, Sangyoung And Chang, Naehyuck},
  title = {Optimal Control Of A Grid-connected Hybrid Electrical Energy Storage System For Homes},
  booktitle = {Proceedings Of The Conference On Design, Automation And Test In Europe},
  publisher = {Eda Consortium},
  year = {2013},
  pages = {881--886}
}
Wijaya TK, Eberle J and Aberer K (2013), "Symbolic Representation Of Smart Meter Data", In Proceedings Of The Joint Edbt/icdt 2013 Workshops. New York, Ny, Usa , pp. 242-248. ACM.
Abstract: Currently Smart Meter Data Analytics Has Received Enormous Attention Because It Allows Utility Companies To Analyze Customer Consumption Behavior In Real Time. However, The Amount Of Data Generated By These Sensors Is Very Large. As A Result, Analytics Performed On Top Of It Become Very Expensive. Furthermore, Smart Meter Data Contains Very Detailed Energy Consumption Measurement Which Can Lead To Customer Privacy Breach And All Risks Associated With It. In This Work, We Address The Problem On How To Reduce Smart Meter Data Numerosity And Its Detailed Measurement While Maintaining Its Analytics Accuracy. We Convert The Data Into Symbolic Representation And Allow Various Machine Learning Algorithms To Be Performed On Top Of It. In Addition, Our Symbolic Representation Admit An Additional Advantage To Allow Also Algorithms Which Usually Work On Nominal And String To Be Run On Top Of Smart Meter Data. We Provide An Experiment For Classification And Forecasting Tasks Using Real-world Data. And Finally, We Illustrate Several Directions To Extend Our Work Further.
BibTeX:
@inproceedings{wijaya-symbolic:2013,
  author = {Wijaya, Tri Kurniawan And Eberle, Julien And Aberer, Karl},
  title = {Symbolic Representation Of Smart Meter Data},
  booktitle = {Proceedings Of The Joint Edbt/icdt 2013 Workshops},
  publisher = {ACM},
  year = {2013},
  pages = {242--248},
  doi = {10.1145/2457317.2457357}
}
Li X, Jiang X, Ye K and Huang P (2013), "Dartcsim+: Enhanced Cloudsim With The Power And Network Models Integrated", In 2013 IEEE Sixth International Conference On Cloud Computing. , pp. 644-651. IEEE.
Abstract: Cloudsim Is One Of The Most Powerful Simulation Platforms For Cloud Computing. It Supports The Energy-conscious Scheduling And Network Simulation In The Latest Version. However, It Still Faces Several Limitations: 1) Current Cloudsim Cannot Support Both The Power Model And The Network Model At The Same Time. 2) The Network Components In Current Cloudsim Do Not Support Power-aware Simulation. 3) The Simulation Of Migration Does Not Take Into Account The Network Overheads. To Overcome These Limitations, We Design And Implement An Enhanced Cloud Simulation Platform Called Dartcsim+ That Supports The Energy-aware Network Simulation And Network-aware Live Migration. Further, We Also Implement A Resubmit Mechanism For Packets Transmission To Provide A More Real Network Behavior To Solve Transmission Failure Which Is Caused By Migration Or Network Failure. Finally, Three Groups Of Experiments Are Performed To Demonstrate The Effectiveness Of Dartcsim+.
BibTeX:
@inproceedings{xiang-li-dartcsim::2013,
  author = {Xiang Li And Xiaohong Jiang And Kejiang Ye And Peng Huang},
  title = {Dartcsim+: Enhanced Cloudsim With The Power And Network Models Integrated},
  booktitle = {2013 IEEE Sixth International Conference On Cloud Computing},
  publisher = {IEEE},
  year = {2013},
  pages = {644--651},
  url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6676752},
  doi = {10.1109/CLOUD.2013.53}
}
Beamish B (2012), "Dcim Myths And Realities". November, 2012.
BibTeX:
@misc{beamish-dcim:2012,
  author = {Beamish, Brad},
  title = {Dcim Myths And Realities},
  year = {2012}
}
Filho MF and Neto MF (2012), "Data Center Infrastructure Management And Automation Systems: An Evaluation Method". November, 2012.
Abstract: Information Technology Is Growing Faster Then Ever And New Challenges Appear Everyday. One Of These Challenges Is Data Storage And Processing, And Data Centers Have A Crucial Role In This Scenario. But As Data Centers Get Bigger, More Energy Is Necessary And Availability Is Fundamental. Considering Worries About Energy Costs And Availability Management, New Systems Of Data Center Infrastructure Automation Are Under Development. This Paper Presents A Proposal For A Method To Evaluate Hardware And Software Platforms That Compose These Systems, Called Dcim -- Data Center Infrastructure Management
BibTeX:
@misc{filho-fazion-dcim:2012,
  author = {Filho, M. F. And Neto, M. F.},
  title = {Data Center Infrastructure Management And Automation Systems: An Evaluation Method},
  journal = {Isca, Caine},
  year = {2012},
  doi = {978-1-880843-88-8/ISCA}
}
Murugesan S and Gangadharan GR (2012), "Harnessing Green It: Principles And Practices" Uk, October, 2012. Wiley.
Abstract: ultimately, This Is A Remarkable Book, A Practical Testimonial, And A Comprehensive Bibliography Rolled Into One. It Is A Single, Bright Sword Cut Across The Various Murky Green It Topics. And If My Mistakes And Lessons Learned Through The Green It Journey Are Any Indication, This Book Will Be Used Every Day By Folks Interested In Greening It.''--- simon Y. Liu, Ph.d. & Ed.d., Editor-in-chief, it Professional magazine, IEEE Computer Society, Director, U.s. National Agricultural Librarythis Book Presents A Holistic Perspective On Green It By Discussing Its Various Facets And Showing How To Strategically Embrace Itharnessing Green It: Principles And Practices examines Various Ways Of Making Computing And Information Systems Greener -- Environmentally Sustainable -, As Well As Several Means Of Using Information Technology (it) As A Tool And An Enabler To Improve The Environmental Sustainability. The Book Focuses On Both Greening Of It And Greening By It -- Complimentary Approaches To Attaining Environmental Sustainability.   In A Single Volume, It   Comprehensively Covers Several Key Aspects Of Green It - Green Technologies, Design, Standards, Maturity Models, Strategies And Adoption -, And Presents A Clear Approach To Greening It Encompassing Green Use, Green Disposal, Green Design, And Green Manufacturing. It Also Illustrates How To Strategically Apply Green It In Practice In Several Areas.key Features:presents A Comprehensive Coverage Of Key Topics Of Importance And Practical Relevance  - Green Technologies, Design, Standards, Maturity Models, Strategies And Adoptionhighlights Several Useful Approaches To Embracing Green It In Several Areasfeatures Chapters Written By Accomplished Experts From Industry And Academia Who Have First-hand Knowledge And Expertise In Specific Areas Of Green Itpresents A Set Of Review And Discussion Questions For Each Chapter That Will Help The Readers To Examine And Explore The Green It Domain Furtherincludes A Companion Website Providing  Resources For Further Information And Presentation Slidesthis Book Will Be An Invaluable Resource For It Professionals, Academics, Students, Researchers, Project Leaders/managers, It Business Executives, Cios, Ctos And Anyone Interested In Green It And Harnessing It To Enhance Our Environment.
BibTeX:
@book{murugesan-harnessing:2012,
  author = {Murugesan, San And Gangadharan, G. R.},
  title = {Harnessing Green It: Principles And Practices},
  publisher = {Wiley},
  year = {2012},
  edition = {First}
}
Ramsey B, Mullins B and White E (2012), "Improved Tools For Indoor Zigbee Warwalking", In 2012 IEEE 37th Conference On Local Computer Networks Workshops (lcn Workshops)., October, 2012. , pp. 921-924.
Abstract: Secure Zigbee Wireless Sensor And Control Networks Use 128-bit Aes Encryption To Defend Against Message Sniffing And Unauthorized Access. However, The Low Cost And Low Complexity Of Zigbee Devices Makes Them Vulnerable To Physical Attacks Such As Tampering And Network Key Extraction. Network Administrators And Penetration Testers Require Tools Such As Zbfind To Accurately Locate Zigbee Hardware And Evaluate Physical Security. The Open Source Zbfind Tool Estimates Distance To Zigbee Devices In Real Time Using Received Signal Strength And A Distance Prediction Model. We Collect 4500 Signal Strength Measurements Along Nine Walking Paths Toward Zigbee Transmitters In Three Office Buildings. We Find That The Log-distance Path Loss Model Used By Zbfind Predicts Transmitter Distance With 92.5% Mean Absolute Percentage Error. We Construct An Alternative Linear Model That Reduces Error To 21%.
BibTeX:
@inproceedings{ramsey-improved:2012,
  author = {Ramsey, B.w. And Mullins, B.e. And White, E.d.},
  title = {Improved Tools For Indoor Zigbee Warwalking},
  booktitle = {2012 IEEE 37th Conference On Local Computer Networks Workshops (lcn Workshops)},
  year = {2012},
  pages = {921--924},
  doi = {10.1109/LCNW.2012.6424083}
}
Sanchez OP and Cappellozza A (2012), "Antecedents Of The Adoption Of Cloud Computing: Effects Of Infrastructure, Investment And Size", Revista De Administração Contemporânea., October, 2012. Vol. 16(5), pp. 646-663.
BibTeX:
@article{sanchez-antecedents:2012,
  author = {Sanchez, Otavio Prospero And Cappellozza, Alexandre},
  title = {Antecedents Of The Adoption Of Cloud Computing: Effects Of Infrastructure, Investment And Size},
  journal = {Revista De Administração Contemporânea},
  year = {2012},
  volume = {16},
  number = {5},
  pages = {646--663},
  doi = {10.1590/S1415-65552012000500002}
}
Glanz J (2012), "Data Centers Waste Vast Amounts Of Energy, Belying Industry Image", The New York Times., September, 2012.
Abstract: Helping To Process The Staggering Amount Of Internet Activity That Occurs, Data Centers Waste Vast Amounts Of Energy, Belying The Information Industry's Image Of Environmental Friendliness.
BibTeX:
@article{glanz-data:2012,
  author = {Glanz, James},
  title = {Data Centers Waste Vast Amounts Of Energy, Belying Industry Image},
  journal = {The New York Times},
  year = {2012}
}
Glanz J (2012), "Data Centers In Rural Washington State Gobble Power", The New York Times., September, 2012.
Abstract: When Internet Factories Come To Town, They Can Feel Less Like Their Sleek, Clean And Convenient Image And More Like Old-time Manufacturing.
BibTeX:
@article{glanz-data:2012-1,
  author = {Glanz, James},
  title = {Data Centers In Rural Washington State Gobble Power},
  journal = {The New York Times},
  year = {2012}
}
NIST (2012), "SP 800-30 Rev. 1, Guide for Conducting Risk Assessments". Thesis at: NIST. USA, September, 2012. (SP800-30R1)
Abstract: The purpose of Special Publication 800-30 is to provide guidance for conducting risk assessments of federal information systems and organizations, amplifying the guidance in Special Publication 800-39. Risk assessments, carried out at all three tiers in the risk management hierarchy, are part of an overall risk management process—providing senior leaders/executives with the information needed to determine appropriate courses of action in response to identified risks.
BibTeX:
@techreport{nist-sp:2012,
  author = {NIST},
  title = {SP 800-30 Rev. 1, Guide for Conducting Risk Assessments},
  school = {NIST},
  year = {2012},
  number = {SP800-30R1},
  url = {https://csrc.nist.gov/publications/detail/sp/800-30/rev-1/final}
}
Wang W, Chen H and Chen X (2012), "An Availability-Aware Virtual Machine Placement Approach for Dynamic Scaling of Cloud Applications", In 2012 9th International Conference on Ubiquitous Intelligence and Computing and 9th International Conference on Autonomic and Trusted Computing., September, 2012. , pp. 509-516.
Abstract: Cloud computing promises customers the on-demand ability to dynamically provision virtualization resources in face of workload variations. Most existing scaling approaches addressed this problem by allocating application to a certain amount of cloud resources. However, the problem of the availability of application influenced by VM-based physical locations during resource scaling process is a serious challenge due to dynamic complex workload and has not been widely discussed yet. In this paper, we present a novel availability-based computing model to describe availability attribute of one application in the hierarchical structured cloud. Moreover, we propose an availability-aware policy by performing both vertical and horizontal scaling to explore how and where to allocate computing resource. Simulation results indicate that our model captured the availability of cloud applications properly and proposed scaling approach achieves the objectives of meeting availability demands and minimizing the total communication cost.
BibTeX:
@inproceedings{wang-availability-aware:2012,
  author = {Wang, W. and Chen, H. and Chen, X.},
  title = {An Availability-Aware Virtual Machine Placement Approach for Dynamic Scaling of Cloud Applications},
  booktitle = {2012 9th International Conference on Ubiquitous Intelligence and Computing and 9th International Conference on Autonomic and Trusted Computing},
  year = {2012},
  pages = {509--516},
  doi = {10.1109/UIC-ATC.2012.31}
}
Bauer E and Adams R (2012), "Reliability and Availability of Cloud Computing" Piscataway, NJ : Hoboken, NJ, August, 2012. Wiley-IEEE Press.
Abstract: A holistic approach to service reliability and availability of cloud computing Reliability and Availability of Cloud Computing provides IS/IT system and solution architects, developers, and engineers with the knowledge needed to assess the impact of virtualization and cloud computing on service reliability and availability. It reveals how to select the most appropriate design for reliability diligence to assure that user expectations are met. Organized in three parts (basics, risk analysis, and recommendations), this resource is accessible to readers of diverse backgrounds and experience levels. Numerous examples and more than 100 figures throughout the book help readers visualize problems to better understand the topic—and the authors present risks and options in bulleted lists that can be applied directly to specific applications/problems. Special features of this book include: Rigorous analysis of the reliability and availability risks that are inherent in cloud computing Simple formulas that explain the quantitative aspects of reliability and availability Enlightening discussions of the ways in which virtualized applications and cloud deployments differ from traditional system implementations and deployments Specific recommendations for developing reliable virtualized applications and cloud-based solutions Reliability and Availability of Cloud Computing is the guide for IS/IT staff in business, government, academia, and non-governmental organizations who are moving their applications to the cloud. It is also an important reference for professionals in technical sales, product management, and quality management, as well as software and quality engineers looking to broaden their expertise.
BibTeX:
@book{bauer-reliability:2012,
  author = {Bauer, Eric and Adams, Randee},
  title = {Reliability and Availability of Cloud Computing},
  publisher = {Wiley-IEEE Press},
  year = {2012},
  edition = {1 edition}
}
Feller E, Rohr C, Margery D and Morin C (2012), "Energy Management in IaaS Clouds: A Holistic Approach", In 2012 IEEE Fifth International Conference on Cloud Computing., June, 2012. , pp. 204-212.
Abstract: Energy efficiency has now become one of the major design constraints for current and future cloud data center operators. One way to conserve energy is to transition idle servers into a lower power-state (e.g. suspend). Therefore, virtual machine (VM) placement and dynamic VM scheduling algorithms are proposed to facilitate the creation of idle times. However, these algorithms are rarely integrated in a holistic approach and experimentally evaluated in a realistic environment. In this paper we present the energy management algorithms and mechanisms of a novel holistic energy-aware VM management framework for private clouds called Snooze. We conduct an extensive evaluation of the energy and performance implications of our system on 34 power-metered machines of the Grid'5000 experimentation testbed under dynamic web workloads. The results show that the energy saving mechanisms allow Snooze to dynamically scale data center energy consumption proportionally to the load, thus achieving substantial energy savings with only limited impact on application performance.
BibTeX:
@inproceedings{feller-energy:2012,
  author = {Feller, E. and Rohr, C. and Margery, D. and Morin, C.},
  title = {Energy Management in IaaS Clouds: A Holistic Approach},
  booktitle = {2012 IEEE Fifth International Conference on Cloud Computing},
  year = {2012},
  pages = {204--212},
  doi = {10.1109/CLOUD.2012.50}
}
Eiland R, Fernandes J, Gebrehiwot B, Vallejo M, Agonafer D and Mulay V (2012), "Air Filter Effects On Data Center Supply Fan Power", In 2012 13th IEEE Intersociety Conference On Thermal And Thermomechanical Phenomena In Electronic Systems (itherm)., May, 2012. , pp. 377-384.
Abstract: Driven By Need To Reduce Energy Use, Many New Data Centers Are Built With Air-side Economizers. This Method Of Bringing In Outside Air As The Primary Cooling Resource Greatly Reduces Overall Facility Power By Eliminating Need For The Compressors And Pumps Required By Chilled Water And Refrigerant Based Cooling Systems. However, System Fans Are Still Necessary To Supply And Move Large Amounts Of Air Through The Facility. The Operating Power Of Fans Is Dependent On The Static Pressure Which Must Be Overcome To Move Air. The Static Pressure Of A Data Center Is Generally Fixed For A Given Configuration, With The Exception Of Air Filters Which Become Clogged Over Time. In This Work, Airflow Versus Pressure Drop Curves Of Filters Removed After Eight And A Half Months Of Service In A Large Data Center Operating An Air-side Economizer Are Experimentally Characterized. Comparisons Of This Data Against Clean Filters Provides Insight Into The Change In Pressure Drop Of Both Low-efficiency Pleated Pre-filters And High-efficiency Cartridge Final Filters As They Become Dirty. Incorporating These System Resistance Curves Into The Power Curve Of The Data Center Supply Fans Indicates Only Minimal Increase In Energy Use Resulting From Dust And Particle Collection Of The Filters.
BibTeX:
@inproceedings{eiland-air:2012,
  author = {Eiland, R. And Fernandes, J. And Gebrehiwot, B. And Vallejo, M. And Agonafer, D. And Mulay, V.},
  title = {Air Filter Effects On Data Center Supply Fan Power},
  booktitle = {2012 13th IEEE Intersociety Conference On Thermal And Thermomechanical Phenomena In Electronic Systems (itherm)},
  year = {2012},
  pages = {377--384},
  doi = {10.1109/ITHERM.2012.6231454}
}
Russell L, Steele A and Goubran R (2012), "Low-cost, Rapid Prototyping Of Imu And Pressure Monitoring System Using An Open Source Hardware Design", In Instrumentation And Measurement Technology Conference (i2mtc), 2012 IEEE International., May, 2012. , pp. 2695-2699.
Abstract: Open Source Hardware Is A Type Of Hardware Where The Schematics And Designs Are Made Unrestricted And Available To All. They Are Often Accompanied By Open Source Software. This Can Bring Reliability, Ease Of Debugging, And Modular Development For Rapid Prototyping Using Pre-written Libraries. Merits Of Using Open Source Hardware Are Discussed And Then Applied To A Portable Sensor System Based On The Open Hardware Arduino-derived Jeenode Microcontroller Board. The System Uses An Inertial Measurement Unit (imu) And Seamless Integration Of Other Sensors, Including A Piezo-resistive Pressure Sensor. It Is Shown That Open Source Hardware Can Help To Increase Rapid Development, Reduce Costs, And Encourage Further Development.
BibTeX:
@inproceedings{russell-low-cost:2012,
  author = {Russell, L. And Steele, A.l. And Goubran, R.},
  title = {Low-cost, Rapid Prototyping Of Imu And Pressure Monitoring System Using An Open Source Hardware Design},
  booktitle = {Instrumentation And Measurement Technology Conference (i2mtc), 2012 IEEE International},
  year = {2012},
  pages = {2695--2699},
  doi = {10.1109/I2MTC.2012.6229719}
}
Arno R, Friedl A, Gross P and Schuerger R (2012), "Reliability Of Data Centers By Tier Classification", IEEE-ia., March, 2012. , pp. 777-783.
Abstract: When The Concept Of Reliability Began To Formally Become An Integrated Engineering Approach In The 50s, Reliability Was Associated With Failure Rate. Today The Term “reliability” Is Used As An Umbrella Definition Covering A Variety Of Subjects Including Availability, Durability, Quality, And Sometimes The Function Of The Product. Reliability Engineering Was Developed To Quantify “how Reliable” A Component, Product, Or System Was When Used In A Specific Application For A Specific Period Of Time. The Data Center Industry Has Come To Rely On “tier Classifications” As Presented In A Number Of Papers By The Uptime Institute As A Gradient Scale Of Data Center Configurations And Requirements From Least (tier 1) To Most Reliable (tier 4). This Paper Will Apply The Principles And Modeling Techniques Of Reliability Engineering To Specific Examples Of Each Of The Tier Classifications And Discuss The Results. A Review Of The Metrics Of Reliability Engineering Being Used Will Also Be Included.
BibTeX:
@article{arno-reliability:2012,
  author = {Arno, R. And Friedl, A. And Gross, P. And Schuerger, R.j.},
  title = {Reliability Of Data Centers By Tier Classification},
  journal = {IEEE-ia},
  year = {2012},
  pages = {777--783},
  doi = {10.1109/TIA.2011.2180872}
}
Toeroe M and Tam F (2012), "Service Availability: Principles and Practice", March, 2012. John Wiley & Sons.
Abstract: Our society increasingly depends on computer-based systems; the number of applications deployed has increased dramatically in recent years and this trend is accelerating. Many of these applications are expected to provide their services continuously. The Service Availability Forum has recognized this need and developed a set of specifications to help software designers and developers to focus on the value added function of applications, leaving the availability management functions for the middleware. A practical and informative reference for the Service Availability Forum specifications, this book gives a cohesive explanation of the founding principles, motivation behind the design of the specifications, and the solutions, usage scenarios and limitations that a final system may have. Avoiding complex mathematical explanations, the book takes a pragmatic approach by discussing issues that are as close as possible to the daily software design/development by practitioners, and yet at a level that still takes in the overall picture. As a result, practitioners will be able to use the specifications as intended. Takes a practical approach, giving guidance on the use of the specifications to explain the architecture, redundancy models and dependencies of the Service Availability (SA) Forum services Explains how service availability provides fault tolerance at the service level Clarifies how the SA Forum solution is supported by open source implementations of the middleware Includes fragments of code, simple example and use cases to give readers a practical understanding of the topic Provides a stepping stone for applications and system designers, developers and advanced students to help them understand and use the specifications
BibTeX:
@book{toeroe-service:2012,
  author = {Toeroe, Maria and Tam, Francis},
  title = {Service Availability: Principles and Practice},
  publisher = {John Wiley & Sons},
  year = {2012}
}
Chen M, Vasilakos AV and Grace D (2012), "Advances In Green Mobile Networks", Mobile Networks And Applications., February, 2012. Vol. 17(1), pp. 1-3.
BibTeX:
@article{chen-advances:2012,
  author = {Chen, Min And Vasilakos, Athanasios V. And Grace, David},
  title = {Advances In Green Mobile Networks},
  journal = {Mobile Networks And Applications},
  year = {2012},
  volume = {17},
  number = {1},
  pages = {1--3},
  doi = {10.1007/s11036-011-0346-y}
}
Wang X, Vasilakos AV, Chen M, Liu Y and Kwon TT (2012), "A Survey Of Green Mobile Networks: Opportunities And Challenges", Mna., February, 2012. , pp. 4-20.
Abstract: The Explosive Development Of Information And Communication Technology (ict) Has Significantly Enlarged Both The Energy Demands And The Co 2 Emissions, And Consequently Contributes To Make The Energy Crisis And Global Warming Problems Worse. However, As The Main Force Of The Ict Field, The Mobile Networks, Are Currently Focusing On The Capacity, Variety And Stability Of The Communication Services, Without Paying Too Much Severe Concerns On The Energy Efficiency. The Escalating Energy Costs And Environmental Concerns Have Already Created An Urgent Need For More Energy-efficient ``green'' Wireless Communications. In This Paper, We Survey And Discuss Various Remarkable Techniques Toward Green Mobile Networks To Date, Mainly Targeting Mobile Cellular Networks. We Also Summarize The Current Research Projects Related To Green Mobile Networks, Along With The Taxonomy Of Energy-efficiency Metrics. We Finally Discuss And Elaborate Future Research Opportunities And Design Challenges For Green Mobile Networks.
BibTeX:
@article{wang-survey:2012,
  author = {Wang, Xiaofei And Vasilakos, Athanasios V. And Chen, Min And Liu, Yunhao And Kwon, Ted Taekyoung},
  title = {A Survey Of Green Mobile Networks: Opportunities And Challenges},
  journal = {Mna},
  year = {2012},
  pages = {4--20},
  doi = {10.1007/s11036-011-0316-4}
}
Ganesh L (2012), "Data Center Energy Management". Thesis at: Cornell. Ithaca, New York, Usa, January, 2012.
Abstract: Data Centers Form The Underpinnings Of The Global Technology Revolution That Is Cloud Computing. There Is Enormous Pressure For Data Center Growth And Expansion, To Meet The Computational Demands Of An Increasingly Digital World. With Energy Costs Overtaking Server Costs In Data Centers, Energy Is Fast Becoming A Significant Bottleneck To Data Center Scale-out. Further, The Global Data Center Energy Footprint Is Growing To Be A Significant Burden On The World’s Energy Resources. Yet Energy Is A Signally Ill-managed Resource In Most Data Centers; Average Data Center Energy Efficiency Is Less Than 50%. With Increasing Industry Awareness Of The Magnitude And Urgency Of This Problem, Many Solutions Are Cropping Up To Combat Each Of The Several Sources Of Data Center Energy Inefficiency. The Objective Of This Dissertation Is Three-fold: First, We Examine The Causes Of Data Center Energy Inefficiency From First Principles, And Identify The Challenges Involved In Addressing Them. We Find Two Categories Of Energy Inefficiency: Idle Resource Energy Consumption, And Support Infrastructure Energy Consumption. Second, We Present Solutions To Address Each Form Of Inefficiency. We Describe Two Ways To Combat Idle Resource Energy Consumption, And Also Present A Systemic Solution To Tackle Both Forms Of Energy Inefficiency. Finally, Throughout This Dissertation, We Examine The Related Work And Literature, And Attempt To Map Them Into The Solution Space To Identify How The Solutions Relate With Each Other, And What Gaps Remain To Be Addressed. The Cloud Has The Potential To Enable Everything From Ubiquitous Computing And Universal Access To Knowledge, To Smart Power Grids, Greater Social Connectivity, And Near-infinite Extensibility Of Compute/storage Power. The Cloud Turns Computation Into A Utility, And By Doing So, Has The Potential To Make It Accessible To A Much Larger Part Of The World. This Dissertation Explores Ways To Enable Sustainable Scaling Of The Data Centers That Power The Cloud And Enable This Vision.
BibTeX:
@phdthesis{ganesh-data:2012,
  author = {Ganesh, Lakshmi},
  title = {Data Center Energy Management},
  school = {Cornell},
  year = {2012},
  url = {http://fireless.cs.cornell.edu/publications/thesis_lakshmi.pdf}
}
Liu J and Terzis A (2012), "Sensing Data Centers For Energy Efficiency", Philosophical Transactions Of The Royal Society A. January 13, 2012 370 1958 136-157., January, 2012.
Abstract: Data Centers Are Large Energy Consumers Today And Their Consumption Is Expected To Increase Further, Driven By The Growth In Cloud Services. The Large Costs And The Environmental Impact Of This Consumption Have Motivated Data Center Operators To Optimize Data Center Operations. We Argue That One Of The Underlying Reasons For The Low Energy Utilization Is The Lack Of Visibility Into A Data Center's Highly Dynamic Operating Conditions. Wireless Sensor Networks Promise To Remove This Veil Of Uncertainty By Delivering Large Volumes Of Data Collected At High Spatial And Temporal Fifidelities. The Paper Summarizes Data Center Operations In Order To Describe The Parameters That A Data Center Sensing Network Would Need To Collect And Motivate The Challenges That Such A Network Would Face. We Present Technical Approaches For The Problems Of Data Collection And Management And Close With An Overview Of Data Center Genome, An End-to-end Data Center Sensing System.
BibTeX:
@article{liu-microsoft:2012,
  author = {Jie Liu And Andreas Terzis},
  title = {Sensing Data Centers For Energy Efficiency},
  journal = {Philosophical Transactions Of The Royal Society A. January 13, 2012 370 1958 136-157},
  year = {2012}
}
Alaraifi A (2012), "The Application And Impact Of Sensor Based Information Systems In Data Centers: A Literature Review", Procedia Engineering. Vol. 41, pp. 819-826.
Abstract: The Demand For And On Data Centers Continue To Pose Several Power, Cooling, And Performance Constraints Associated With Operational, Economic And Environmental Inefficiency. Sensor Based Information Systems (sbis) Are One Of The Best Practices For Addressing These Constraints. The Aim Of The Paper Is To Review The Research On The Applications Of Sbis In Data Centers And Discusses The Opportunities For Utilizing Sbis To Support The Business Functions Of The Data Centers Including The Management Of Cooling, Power Delivery And Computing Platforms. Although The Use Of Sensors To Monitor Temperature, Smoke, Heat And Security Is Considered An Old Practice In Data Centers, The Full Utilization And Integration Of These Sensors Into Information Systems To Automate Data Centre Management Functions, Inform Decision Making In Data Centre Management And Transform Data Centers To Improve Their Operational, Economic And Environmental Performance Appears To Be Very Limited. The Paper Reviews The Current Literature And Concludes That There Is A Dearth For Empirical Studies That Focus On The Use Of Sbis And The Benefits Of Sbis To The Data Centers. Thus, The Paper Calls For More Theoretical And Empirical Research To Investigate The Utilization Of Sbis To Manage Data Centers’ Platforms And Its Impact On Data Centers Performance.
BibTeX:
@article{alaraifi-application:2012,
  author = {Alaraifi, Adel},
  title = {The Application And Impact Of Sensor Based Information Systems In Data Centers: A Literature Review},
  journal = {Procedia Engineering},
  year = {2012},
  volume = {41},
  pages = {819--826},
  doi = {10.1016/j.proeng.2012.07.249}
}
Beloglazov A, Abawajy J and Buyya R (2012), "Energy-aware Resource Allocation Heuristics For Efficient Management Of Data Centers For Cloud Computing", Future Gener. Comput. Syst.. Vol. 28(5), pp. 755-768.
Abstract: Cloud Computing Offers Utility-oriented It Services To Users Worldwide. Based On A Pay-as-you-go Model, It Enables Hosting Of Pervasive Applications From Consumer, Scientific, And Business Domains. However, Data Centers Hosting Cloud Applications Consume Huge Amounts Of Electrical Energy, Contributing To High Operational Costs And Carbon Footprints To The Environment. Therefore, We Need Green Cloud Computing Solutions That Can Not Only Minimize Operational Costs But Also Reduce The Environmental Impact. In This Paper, We Define An Architectural Framework And Principles For Energy-efficient Cloud Computing. Based On This Architecture, We Present Our Vision, Open Research Challenges, And Resource Provisioning And Allocation Algorithms For Energy-efficient Management Of Cloud Computing Environments. The Proposed Energy-aware Allocation Heuristics Provision Data Center Resources To Client Applications In A Way That Improves Energy Efficiency Of The Data Center, While Delivering The Negotiated Quality Of Service (qos). In Particular, In This Paper We Conduct A Survey Of Research In Energy-efficient Computing And Propose: (a) Architectural Principles For Energy-efficient Management Of Clouds; (b) Energy-efficient Resource Allocation Policies And Scheduling Algorithms Considering Qos Expectations And Power Usage Characteristics Of The Devices; And (c) A Number Of Open Research Challenges, Addressing Which Can Bring Substantial Benefits To Both Resource Providers And Consumers. We Have Validated Our Approach By Conducting A Performance Evaluation Study Using The Cloudsim Toolkit. The Results Demonstrate That Cloud Computing Model Has Immense Potential As It Offers Significant Cost Savings And Demonstrates High Potential For The Improvement Of Energy Efficiency Under Dynamic Workload Scenarios.
BibTeX:
@article{beloglazov-energy-aware:2012,
  author = {Beloglazov, Anton And Abawajy, Jemal And Buyya, Rajkumar},
  title = {Energy-aware Resource Allocation Heuristics For Efficient Management Of Data Centers For Cloud Computing},
  journal = {Future Gener. Comput. Syst.},
  year = {2012},
  volume = {28},
  number = {5},
  pages = {755--768},
  doi = {10.1016/j.future.2011.04.017}
}
Beloglazov A and Buyya R (2012), "Optimal Online Deterministic Algorithms And Adaptive Heuristics For Energy And Performance Efficient Dynamic Consolidation Of Virtual Machines In Cloud Data Centers", Concurrency Computat.: Pract. Exper.. Vol. 24(13), pp. 1397-1420.
Abstract: The Rapid Growth In Demand For Computational Power Driven By Modern Service Applications Combined With The Shift To The Cloud Computing Model Have Led To The Establishment Of Large-scale Virtualized Data Centers. Such Data Centers Consume Enormous Amounts Of Electrical Energy Resulting In High Operating Costs And Carbon Dioxide Emissions. Dynamic Consolidation Of Virtual Machines (vms) Using Live Migration And Switching Idle Nodes To The Sleep Mode Allows Cloud Providers To Optimize Resource Usage And Reduce Energy Consumption. However, The Obligation Of Providing High Quality Of Service To Customers Leads To The Necessity In Dealing With The Energy-performance Trade-off, As Aggressive Consolidation May Lead To Performance Degradation. Because Of The Variability Of Workloads Experienced By Modern Applications, The Vm Placement Should Be Optimized Continuously In An Online Manner. To Understand The Implications Of The Online Nature Of The Problem, We Conduct A Competitive Analysis And Prove Competitive Ratios Of Optimal Online Deterministic Algorithms For The Single Vm Migration And Dynamic Vm Consolidation Problems. Furthermore, We Propose Novel Adaptive Heuristics For Dynamic Consolidation Of Vms Based On An Analysis Of Historical Data From The Resource Usage By Vms. The Proposed Algorithms Significantly Reduce Energy Consumption, While Ensuring A High Level Of Adherence To The Service Level Agreement. We Validate The High Efficiency Of The Proposed Algorithms By Extensive Simulations Using Real-world Workload Traces From More Than A Thousand Planetlab Vms. Copyright &circledc; 2011 John Wiley & Sons, Ltd.
BibTeX:
@article{beloglazov-optimal:2012,
  author = {Beloglazov, Anton And Buyya, Rajkumar},
  title = {Optimal Online Deterministic Algorithms And Adaptive Heuristics For Energy And Performance Efficient Dynamic Consolidation Of Virtual Machines In Cloud Data Centers},
  journal = {Concurrency Computat.: Pract. Exper.},
  year = {2012},
  volume = {24},
  number = {13},
  pages = {1397--1420},
  doi = {10.1002/cpe.1867}
}
Bhagwat H, Singh A, Vasan A and Sivasubramaniam A (2012), "Thermal Influence Indices: Causality Metrics For Efficient Exploration Of Data Center Cooling", In IGCC 2012.
Abstract: Cooling Is An Important Issue In Data Center Design And Operation. Accurate Evaluation Of A Design Or Operational Parameter Choice For Cooling Is Difficult As It Requires Several Runs Of Computationally Intensive Computational Fluid Dynamics (cfd) Based Models. Therefore There Is Need For An Exploration Method That Does Not Incur Enormous Computation. In Addition, The Exploration Should Also Provide Insights That Enable Informed Decision Making. Given These Twin Goals Of Reduced Computation And Improved Insights, We Present A Novel Approach To Data Center Cooling Exploration. The Key Idea Is To Do A Local Search Around The Current Design/operation Of A Data Center To Obtain Better Design/operation Parameters Subject To The Desired Constraints. To Do This, All The Microscopic Information About Airflow And Temperature In Data Center Available From A Single Run Of Cfd Computation Is Converted Into Macroscopic Metrics Called Influence Indices. The Influence Indices, Which Characterize The Causal Relationship Between Heat Sources And Sinks, Are Used To Refine The Design/operation Of The Data Center Either Manually Or Programmatically. New Designs Are Evaluated With Further Cfd Runs To Compute New Influence Indices And The Process Is Repeated To Yield Improved Designs As Per The Computation Budget Available. We Have Carried Out Design Exploration Of A Realistic Data Center Using This Methodology. Specifically, We Considered Maximization Of The Heat Load In The Data Center Subject To The Constraints That: 1) Servers Are Kept At Appropriate Temperatures And 2) Overloading Of Cracs Is Avoided. Our Evaluation Shows That The Use Of Influence Indices Cuts Down The Exploration Time By 80 % For A 1500 Sq. Ft. Data Center.
BibTeX:
@inproceedings{bhagwat-thermal:2012,
  author = {Bhagwat, H. And Singh, A. And Vasan, A. And Sivasubramaniam, Anand},
  title = {Thermal Influence Indices: Causality Metrics For Efficient Exploration Of Data Center Cooling},
  booktitle = {IGCC 2012},
  year = {2012},
  doi = {10.1109/IGCC.2012.6322254}
}
Capone V, Esposito R, Pardi S, Taurino F and Tortone G (2012), "Design and implementation of a reliable and cost-effective cloud computing infrastructure: the INFN Napoli experience", J. Phys.: Conf. Ser.. Vol. 396(4), pp. 042012.
Abstract: Over the last few years we have seen an increasing number of services and applications needed to manage and maintain cloud computing facilities. This is particularly true for computing in high energy physics, which often requires complex configurations and distributed infrastructures. In this scenario a cost effective rationalization and consolidation strategy is the key to success in terms of scalability and reliability. In this work we describe an IaaS (Infrastructure as a Service) cloud computing system, with high availability and redundancy features, which is currently in production at INFN-Naples and ATLAS Tier-2 data centre. The main goal we intended to achieve was a simplified method to manage our computing resources and deliver reliable user services, reusing existing hardware without incurring heavy costs. A combined usage of virtualization and clustering technologies allowed us to consolidate our services on a small number of physical machines, reducing electric power costs. As a result of our efforts we developed a complete solution for data and computing centres that can be easily replicated using commodity hardware. Our architecture consists of 2 main subsystems: a clustered storage solution, built on top of disk servers running GlusterFS file system, and a virtual machines execution environment. GlusterFS is a network file system able to perform parallel writes on multiple disk servers, providing this way live replication of data. High availability is also achieved via a network configuration using redundant switches and multiple paths between hypervisor hosts and disk servers. We also developed a set of management scripts to easily perform basic system administration tasks such as automatic deployment of new virtual machines, adaptive scheduling of virtual machines on hypervisor hosts, live migration and automated restart in case of hypervisor failures.
BibTeX:
@article{capone-design:2012,
  author = {Capone, V. and Esposito, R. and Pardi, S. and Taurino, F. and Tortone, G.},
  title = {Design and implementation of a reliable and cost-effective cloud computing infrastructure: the INFN Napoli experience},
  journal = {J. Phys.: Conf. Ser.},
  year = {2012},
  volume = {396},
  number = {4},
  pages = {042012},
  url = {http://stacks.iop.org/1742-6596/396/i=4/a=042012},
  doi = {10.1088/1742-6596/396/4/042012}
}
Caraman MC, Moraru SA, Dan S and Grama C (2012), "Continuous disaster tolerance in the iaas clouds", In Optimization of Electrical and Electronic Equipment (OPTIM), 2012 13th International Conference on. , pp. 1226-1232. IEEE.
BibTeX:
@inproceedings{caraman-continuous:2012,
  author = {Caraman, Mihai Claudiu and Moraru, Sorin Aurel and Dan, Stefan and Grama, Catalin},
  title = {Continuous disaster tolerance in the iaas clouds},
  booktitle = {Optimization of Electrical and Electronic Equipment (OPTIM), 2012 13th International Conference on},
  publisher = {IEEE},
  year = {2012},
  pages = {1226--1232},
  url = {http://ieeexplore.ieee.org/abstract/document/6231987/}
}
Chan H and Chieu T (2012), "An Approach to High Availability for Cloud Servers with Snapshot Mechanism", In Proceedings of the Industrial Track of the 13th ACM/IFIP/USENIX International Middleware Conference. New York, NY, USA , pp. 6:1-6:6. ACM.
Abstract: Virtualization technologies enable the execution of multiple virtual machine instances (VMs) with different operating systems (OSs) on the same physical host. Each VM instance functions independently as an isolated system with its own physical resources, OS and applications. Due to significant cost saving and efficiency, the virtualization model has been increasingly adapted by enterprises and service providers as their main computing and service delivery infrastructure, running critical internal business and external customer facing applications. To minimize down time due to unexpected VM crashes, a high availability or backup system is usually built into the infrastructure. There are many high availability technology options available such as replication, mirroring and fail over clustering. Most of these solutions are usually designed based on the traditional computing model, they are costly to implement, complicated and tedious to maintain, especially in a virtualized environment, and they often require additional expensive hardware and software components. In this paper, we introduce a simple, flexible, scalable, extensible, efficient and cost effective system which utilizes the current VM infrastructure and common utilities to provide a high availability solution in the virtualization environment. Our smart adaptive snapshot replication technique provides a smooth and reliable mechanism for cost-performance, wherein the amount of resources allocated for high availability solution can be adjusted based on available resources, utilization and customer requirements.
BibTeX:
@inproceedings{chan-approach:2012,
  author = {Chan, Hoi and Chieu, Trieu},
  title = {An Approach to High Availability for Cloud Servers with Snapshot Mechanism},
  booktitle = {Proceedings of the Industrial Track of the 13th ACM/IFIP/USENIX International Middleware Conference},
  publisher = {ACM},
  year = {2012},
  pages = {6:1--6:6},
  url = {http://doi.acm.org/10.1145/2405146.2405152},
  doi = {10.1145/2405146.2405152}
}
Corradi A, Fanelli M and Foschini L (2012), "Vm Consolidation: A Real Case Based On Openstack Cloud", Future Generation Computer Systems. Vol. 32, pp. 118-127.
Abstract: In recent years, cloud computing has been emerging as the next big revolution in both computer networks and web provisioning. because of raised expectations, several vendors, such as amazon and ibm, started designing, developing, and deploying cloud solutions to optimize the usage of their own data centers, and some open-source solutions are also underway, such as eucalyptus and openstack. cloud architectures exploit virtualization techniques to provision multiple virtual machines (vms) on the same physical host, so as to efficiently use available resources, for instance, to consolidate vms in the minimal number of physical servers to reduce the runtime power consumption. vm consolidation has to carefully consider the aggregated resource consumption of co-located vms, in order to avoid performance reductions and service level agreement (sla) violations. while various works have already treated the vm consolidation problem from a theoretical perspective, this paper focuses on it from a more practical viewpoint, with specific attention on the consolidation aspects related to power, cpu, and networking resource sharing. moreover, the paper proposes a cloud management platform to optimize vm consolidation along three main dimensions, namely power consumption, host resources, and networking. reported experimental results point out that interferences between co-located vms have to be carefully considered to avoid placement solutions that, although being feasible from a more theoretical viewpoint, cannot ensure vm provisioning with sla guarantees.
BibTeX:
@article{corradi-vm:2012,
  author = {Corradi, Antonio And Fanelli, Mario And Foschini, Luca},
  title = {Vm Consolidation: A Real Case Based On Openstack Cloud},
  journal = {Future Generation Computer Systems},
  year = {2012},
  volume = {32},
  pages = {118--127},
  url = {http://www.sciencedirect.com/science/article/pii/S0167739X12001082},
  doi = {10.1016/j.future.2012.05.012}
}
Deshpande U, Kulkarni U and Gopalan K (2012), "Inter-rack Live Migration of Multiple Virtual Machines", In Proceedings of the 6th international workshop on Virtualization Technologies in Distributed Computing Date. New York, NY, USA , pp. 19-26. ACM.
Abstract: Within datacenters, often multiple virtual machines (VMs) need to be live migrated simultaneously for various reasons such as maintenance, power savings, and load balancing. Such mass simultaneous live migration of multiple VMs can trigger large data transfers across the core network links and switches, and negatively affect the cluster-wide performance of network-bound applications. In this paper, we present a distributed system for inter-rack live migration (IRLM), i.e., parallel live migration of multiple VMs across racks. The key performance objective of IRLM is to reduce the traffic load on the core network links during mass VM migration through distributed deduplication of VMs' memory images. We present an initial prototype of IRLM that migrates multiple QEMU/KVM VMs within a Gigabit Ethernet cluster with 10GigE core links. We also present preliminary evaluation on a small testbed having 6 hosts per rack and 4 VMs per host. Our evaluations show that, compared to the default live migration technique in QEMU/KVM, IRLM reduces the network traffic on core links by up to 44% and the total migration time by up to 26%. We also demonstrate that network-bound applications experience a smaller degradation during migration using IRLM.
BibTeX:
@inproceedings{deshpande-inter-rack:2012,
  author = {Deshpande, Umesh and Kulkarni, Unmesh and Gopalan, Kartik},
  title = {Inter-rack Live Migration of Multiple Virtual Machines},
  booktitle = {Proceedings of the 6th international workshop on Virtualization Technologies in Distributed Computing Date},
  publisher = {ACM},
  year = {2012},
  pages = {19--26},
  url = {http://dl.acm.org/citation.cfm?id=2287062},
  doi = {10.1145/2287056.2287062}
}
Elsayed EA (2012), "Reliability Engineering" John Wiley & Sons.
Abstract: A newly revised and updated edition that details both the theoretical foundations and practical applications of reliability engineering Reliability is one of the most important quality characteristics of components, products, and large and complex systems—but it takes a significant amount of time and resources to bring reliability to fruition. Thoroughly classroom- and industry-tested, this book helps ensure that engineers see reliability success with every product they design, test, and manufacture. Divided into three parts, Reliability Engineering, Second Edition handily describes the theories and their practical uses while presenting readers with real-world examples and problems to solve. Part I focuses on system reliability estimation for time independent and failure dependent models, helping engineers create a reliable design. Part II aids the reader in assembling necessary components and configuring them to achieve desired reliability objectives, conducting reliability tests on components, and using field data from similar components. Part III follows what happens once a product is produced and sold, how the manufacturer must ensure its reliability objectives by providing preventive and scheduled maintenance and warranty policies. This Second Edition includes in-depth and enhanced chapter coverage of: Reliability and Hazard Functions System Reliability Evaluation Time- and Failure-Dependent Reliability Estimation Methods of the Parameters of Failure-Time Distributions Parametric Reliability Models Models for Accelerated Life Testing Renewal Processes and Expected Number of Failures Preventive Maintenance and Inspection Warranty Models Case Studies A comprehensive reference for practitioners and professionals in quality and reliability engineering, Reliability Engineering can also be used for senior undergraduate or graduate courses in industrial and systems, mechanical, and electrical engineering programs.
BibTeX:
@book{elsayed-reliability:2012,
  author = {Elsayed, Elsayed A.},
  title = {Reliability Engineering},
  publisher = {John Wiley & Sons},
  year = {2012}
}
Emerson-network P (2012), "Energy Logic 2.0 New Strategies For Cutting Data Center Energy Costs And Boosting Capacity". Thesis at: Emerson Network Power. (03947-2012), pp. 39.
Abstract: A Number Of Associations, Consultants And Vendors Have Promoted Best Practices For Enhancing Data Center Energy Efficiency. These Practices Cover Everything From Facility Lighting To Cooling System Design, And Have Proven Useful In Helping Some Companies Slow Or Reverse The Trend Of Rising Data Center Energy Consumption. However, Most Organizations Still Lack A Cohesive, Holistic Approach For Reducing Data Center Energy Use.
BibTeX:
@techreport{emerson-energy:2012,
  author = {Emerson-network, Power},
  title = {Energy Logic 2.0 New Strategies For Cutting Data Center Energy Costs And Boosting Capacity},
  school = {Emerson Network Power},
  year = {2012},
  number = {03947-2012},
  pages = {39}
}
Fiedler T and Mircea PM (2012), "Energy Management Systems According To The Iso 50001 Standard: Challenges And Benefits", In 2012 International Conference On Applied And Theoretical Electricity (icate). , pp. 1-4. IEEE.
Abstract: The Transition To Renewable Energy Sources And Energy Efficiency Have Become A Central Topic, Also For The Producing Industry In Romania And All Over Europe. Saving Energy Is On The Agenda For Companies As Well As Facilities And Public Institutions. Energy Efficiency In Companies Can Be Controlled And Systematized In An Energy Management System. The Iso Standard 50001:2011 Enables Companies And Other Institutions To Achieve A Sustainable Energy Reduction By Systematic Energy Controlling, Documentation And Raising The Awareness Of All Personnel Involved. This Paper Presents The Challenges And Benefits Of An Iso 50001 Implementation In An Industrial Environment As Well As The Methodology And Systematic Approach, But Also Tools Such As Energy Controlling Systems And Measurement Equipment Which Are Helpful To Achieve Energetic Transparency.
BibTeX:
@inproceedings{fiedler-energy:2012,
  author = {Fiedler, T. And Mircea, P. M.},
  title = {Energy Management Systems According To The Iso 50001 Standard: Challenges And Benefits},
  booktitle = {2012 International Conference On Applied And Theoretical Electricity (icate)},
  publisher = {IEEE},
  year = {2012},
  pages = {1--4},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6403411},
  doi = {10.1109/ICATE.2012.6403411}
}
Gagnaire M, Diaz F, Coti C, Cerin C, Shiozaki K, Xu Y, Delort P, Smets J-P, Le Lous J, Lubiarz S and others (2012), "Downtime statistics of current cloud solutions", International Working Group on Cloud Computing Resiliency, Tech. Rep.
BibTeX:
@article{gagnaire-downtime:2012,
  author = {Gagnaire, Maurice and Diaz, Felipe and Coti, Camille and Cerin, Christophe and Shiozaki, Kazuhiko and Xu, Yingjie and Delort, Pierre and Smets, Jean-Paul and Le Lous, Jonathan and Lubiarz, Stephen and others},
  title = {Downtime statistics of current cloud solutions},
  journal = {International Working Group on Cloud Computing Resiliency, Tech. Rep},
  year = {2012},
  url = {http://iwgcr.org/wp-content/uploads/2012/06/IWGCR-Paris.Ranking-002-en.pdf}
}
Holzer D (2012), "What Does Eco-friendly Mean?".
Abstract: In Recent Years, Terms Like ''going Green'' And ''eco-friendly'' Have Become Buzz Words On Talk Shows, Commercials And Product Packaging. The Term ''eco-friendly'' Has Been Used For ...
BibTeX:
@misc{holzer-eco-friendly:2012,
  author = {Holzer, Daniel},
  title = {What Does Eco-friendly Mean?},
  journal = {Home Guides Textbar Sf Gate},
  year = {2012}
}
Kim J, Ruggiero M and Atienza D (2012), "Free cooling-aware dynamic power management for green datacenters", In High Performance Computing and Simulation (HPCS), 2012 International Conference on. , pp. 140-146. IEEE.
BibTeX:
@inproceedings{kim-free:2012,
  author = {Kim, Jungsoo and Ruggiero, Martino and Atienza, David},
  title = {Free cooling-aware dynamic power management for green datacenters},
  booktitle = {High Performance Computing and Simulation (HPCS), 2012 International Conference on},
  publisher = {IEEE},
  year = {2012},
  pages = {140--146},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6266903}
}
Lee EK, Viswanathan H and Pompili D (2012), "Vmap: Proactive Thermal-aware Virtual Machine Allocation In Hpc Cloud Datacenters", In International Conference On High Performance Computing (hipc), 2012 19th. , pp. 1-10. IEEE.
BibTeX:
@inproceedings{lee-vmap::2012,
  author = {Lee, Eun Kyung And Viswanathan, Hariharasudhan And Pompili, Dario},
  title = {Vmap: Proactive Thermal-aware Virtual Machine Allocation In Hpc Cloud Datacenters},
  booktitle = {International Conference On High Performance Computing (hipc), 2012 19th},
  publisher = {IEEE},
  year = {2012},
  pages = {1--10},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6507478}
}
Mishra M, Das A, Kulkarni P and Sahoo A (2012), "Dynamic Resource Management Using Virtual Machine Migrations", IEEE Communications Magazine. Vol. 50(9), pp. 34-40.
BibTeX:
@article{mishra-dynamic:2012,
  author = {Mishra, Mayank And Das, Anwesha And Kulkarni, Purushottam And Sahoo, Anirudha},
  title = {Dynamic Resource Management Using Virtual Machine Migrations},
  journal = {IEEE Communications Magazine},
  year = {2012},
  volume = {50},
  number = {9},
  pages = {34--40},
  url = {http://ieeexplore.ieee.org/document/6295709/},
  doi = {10.1109/MCOM.2012.6295709}
}
Nuaimi KA, Mohamed N, Nuaimi MA and Al-jaroodi J (2012), "A Survey Of Load Balancing In Cloud Computing: Challenges And Algorithms", In 2012 Second Symposium On Network Cloud Computing And Applications (ncca). , pp. 137-142.
Abstract: Load Balancing Is Essential For Efficient Operations Indistributed Environments. As Cloud Computing Is Growingrapidly And Clients Are Demanding More Services And Betterresults, Load Balancing For The Cloud Has Become A Veryinteresting And Important Research Area. Many Algorithms Weresuggested To Provide Efficient Mechanisms And Algorithms Forassigning The Client's Requests To Available Cloud Nodes. Theseapproaches Aim To Enhance The Overall Performance Of The Cloudand Provide The User More Satisfying And Efficient Services. Inthis Paper, We Investigate The Different Algorithms Proposed Toresolve The Issue Of Load Balancing And Task Scheduling In Cloudcomputing. We Discuss And Compare These Algorithms To Providean Overview Of The Latest Approaches In The Field.
BibTeX:
@inproceedings{nuaimi-survey:2012,
  author = {Nuaimi, K. A. And Mohamed, N. And Nuaimi, M. A. And Al-jaroodi, J.},
  title = {A Survey Of Load Balancing In Cloud Computing: Challenges And Algorithms},
  booktitle = {2012 Second Symposium On Network Cloud Computing And Applications (ncca)},
  year = {2012},
  pages = {137--142},
  doi = {10.1109/NCCA.2012.29}
}
Petrovic D and Schiper A (2012), "Implementing Virtual Machine Replication: A Case Study Using Xen and KVM", In Proceedings of the 2012 IEEE 26th International Conference on Advanced Information Networking and Applications. Washington, DC, USA , pp. 73-80. IEEE Computer Society.
Abstract: Virtual machine (VM) replication has been recognized as an inexpensive way of providing high availability on commodity hardware. Unfortunately, its impact on system performance is far from negligible and strategies have been proposed to mitigate this problem. In this paper we take a look at VM replication from a different perspective: the choice of a hyper visor. Namely, the differences between hyper visors in terms of architecture and performance are well known and studied in the literature, but no analysis has been performed so far in the context of replication. Taking open-source hyper visors Xen and KVM as examples, we show what hyper visor services are necessary to implement a primary-backup replication scheme and how hyper visor design affects the development steps and the performance. Interestingly, our user space implementation on top of KVM achieves roughly the same performance as an already existing, more mature Xen implementation, which leads us to the conclusion that the inherent cost of the replication scheme dominates the differences between the chosen hyper visors.
BibTeX:
@inproceedings{petrovic-implementing:2012,
  author = {Petrovic, Darko and Schiper, Andre},
  title = {Implementing Virtual Machine Replication: A Case Study Using Xen and KVM},
  booktitle = {Proceedings of the 2012 IEEE 26th International Conference on Advanced Information Networking and Applications},
  publisher = {IEEE Computer Society},
  year = {2012},
  pages = {73--80},
  url = {http://dx.doi.org/10.1109/AINA.2012.50},
  doi = {10.1109/AINA.2012.50}
}
Reviriego P, Sivaraman V, Zhao Z, Maestro JA, Vishwanath A, Sánchez-macian A and Russell C (2012), "An Energy Consumption Model For Energy Efficient Ethernet Switches", In International Conference On High Performance Computing And Simulation (hpcs), 2012. , pp. 98-104. IEEE.
BibTeX:
@inproceedings{reviriego-energy:2012,
  author = {Reviriego, Pedro And Sivaraman, Vijay And Zhao, Zhi And Maestro, Juan Antonio And Vishwanath, Arun And Sánchez-macian, Alfonso And Russell, Craig},
  title = {An Energy Consumption Model For Energy Efficient Ethernet Switches},
  booktitle = {International Conference On High Performance Computing And Simulation (hpcs), 2012},
  publisher = {IEEE},
  year = {2012},
  pages = {98--104},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6266897}
}
Rodero I, Viswanathan H, Lee EK, Gamell M, Pompili D and Parashar M (2012), "Energy-efficient Thermal-aware Autonomic Management Of Virtualized Hpc Cloud Infrastructure", J Grid Computing. Vol. 10(3), pp. 447-473.
Abstract: Virtualized Datacenters And Clouds Are Being Increasingly Considered For Traditional High-performance Computing (hpc) Workloads That Have Typically Targeted Grids And Conventional Hpc Platforms. However, Maximizing Energy Efficiency And Utilization Of Datacenter Resources, And Minimizing Undesired Thermal Behavior While Ensuring Application Performance And Other Quality Of Service (qos) Guarantees For Hpc Applications Requires Careful Consideration Of Important And Extremely Challenging Tradeoffs. Virtual Machine (vm) Migration Is One Of The Most Common Techniques Used To Alleviate Thermal Anomalies (i.e., Hotspots) In Cloud Datacenter Servers As It Reduces Load And, Hence, The Server Utilization. In This Article, The Benefits Of Using Other Techniques Such As Voltage Scaling And Pinning (traditionally Used For Reducing Energy Consumption) For Thermal Management Over Vm Migrations Are Studied In Detail. As No Single Technique Is The Most Efficient To Meet Temperature/performance Optimization Goals In All Situations, An Autonomic Approach That Performs Energy-efficient Thermal Management While Ensuring The Qos Delivered To The Users Is Proposed. To Address The Problem Of Vm Allocation That Arises During Vm Migrations, An Innovative Application-centric Energy-aware Strategy For Virtual Machine (vm) Allocation Is Proposed. The Proposed Strategy Ensures High Resource Utilization And Energy Efficiency Through Vm Consolidation While Satisfying Application Qos By Exploiting Knowledge Obtained Through Application Profiling Along Multiple Dimensions (cpu, Memory, And Network Bandwidth Utilization). To Support Our Arguments, We Present The Results Obtained From An Experimental Evaluation On Real Hardware Using Hpc Workloads Under Different Scenarios.
BibTeX:
@article{rodero-energy-efficient:2012,
  author = {Rodero, Ivan And Viswanathan, Hariharasudhan And Lee, Eun Kyung And Gamell, Marc And Pompili, Dario And Parashar, Manish},
  title = {Energy-efficient Thermal-aware Autonomic Management Of Virtualized Hpc Cloud Infrastructure},
  journal = {J Grid Computing},
  year = {2012},
  volume = {10},
  number = {3},
  pages = {447--473},
  doi = {10.1007/s10723-012-9219-2}
}
Singh S (2011), "Hand Book Of Mechanical Engineering", December, 2011. S. Chand.
Abstract: Handbook Of Mechanical Engineering Is A Comprehensive Text For The Students Of B.e./b.tech. And The Candidates Preparing For Various Competitive Examination Like Ies/ifs/ Gate State Services And Competitive Tests Conducted By Public And Private Sector Organization For Selecting Apprentice Engineers.
BibTeX:
@book{singh-hand:2011,
  author = {Singh, Sadhu},
  title = {Hand Book Of Mechanical Engineering},
  publisher = {S. Chand},
  year = {2011}
}
Sujatha C and Abimannan S (2011), "Energy Efficient Free Cooling System For Data Centers", In 2011 IEEE Third International Conference On Cloud Computing Technology And Science (cloudcom)., November, 2011. , pp. 646-651.
Abstract: A Data Center Is A Facility Used To Keep Computer Related Equipments. It Is Estimated That Heat Production Rate Of The Data Center Is Doubled In Every Two Years And Hence The Inevitability Of The Cooling System Gets Increased. In Due Course Power Consumption Of A Data Center Is Augmented And More Cost Is Spent On The Power Usage Of The Cooling System Rather Than The Equipment Purchase. As A Result Power Savings For The Cooling System Is Strongly Desired. In This Paper We Conferred Two Primary Free Cooling Systems Namely Air Economizer And Water Economizer. A Free Cooling Economizer System Uses The Outside Air Which Is Forced To The Data Center When Outside Climate Is Suitable To Meet The Ashre's Cooling Requirements. We Have Also Conducted A Survey And Simulation Based Estimation Using Trace[tm] Chiller Plant Analyzer Tool. In This Study, The Energy Consumption In A Data Center Using Conventional Cooling System Is Compared With Air Economizer And Water Economizer For Three Different Zones Namely Chicago, Atlanta And Phoenix In View Of The Fact That The Outside Air Is Relatively Cool Most Of The Year. From The Projected Result It Is Observed That Both Economizers Reduce Energy And Cost When Compared With Conventional System And The Usage Of Economizer Permits The Chiller To Shut Down Or Reduce Chiller Energy Load Under Suitable Weather Conditions. The Results Show That Water Economizers Are Shown To Consistently Outperform Air Economizer Which Provides Significant Improvement In Cooling System Efficiency And Cost At Data Center. The Performance Ratio Of The Conventional, Air Economizer And The Water Economizers Are 50 76% And 79% Respectively That Shows Economizers Provide More Savings Relative To The Conventional System.
BibTeX:
@inproceedings{sujatha-energy:2011,
  author = {Sujatha, C. And Abimannan, S.},
  title = {Energy Efficient Free Cooling System For Data Centers},
  booktitle = {2011 IEEE Third International Conference On Cloud Computing Technology And Science (cloudcom)},
  year = {2011},
  pages = {646--651},
  doi = {10.1109/CloudCom.2011.100}
}
Guardigli M (2011), "Tombox : An Arduino Based Solution For Environmental Monitoring Of Datacenter Racks". November, 2011.
BibTeX:
@misc{tombox:2011,
  author = {Guardigli, Marco},
  title = {Tombox : An Arduino Based Solution For Environmental Monitoring Of Datacenter Racks},
  year = {2011},
  url = {http://marco.guardigli.it/2010/05/arduino-in-datacenter-rack.html}
}
Rodriguez M, Ortiz Uriarte L, Jia Y, Yoshii K, Ross R and Beckman P (2011), "Wireless Sensor Network For Datacenter Environmental Monitoring", In 2011 5th Icst., November, 2011. , pp. 533-537.
Abstract: Data Centers' Energy Consumption Has Attracted Global Attention Because Of The Fast Growth Of The Information Technology (it) Industry. Up To 60% Of The Energy Consumed In A Data Center Is Used For Cooling In Wasteful Ways As A Result Of Lack Of Environmental Information And Overcompensated Cooling Systems. In This Project, A Wireless Sensor Network For Data-enter Environmental Monitoring Was Developed To Improve Energy Efficiency And To Optimize Data-center Performance. The Sensor Network Consists Of A Suite Of Sensor Nodes For Data Sensing, A Router Node To Relay Sensed Data, And A Coordinator Node To Establish A Network, Receive The Data, And Process The Data. The Prototype Sensor Network Was Built On Arduino Open Source Hardware With A Seamlessly Integrated Xbee Rf Module And Configured To Operate Within The Zigbee Mesh Network Standard. A 24-hour Test Run At Argonne's Data Center Demonstrated That The Wireless Networked Environmental Monitoring Solution Is Easy To Integrate And Manage With The Existing It Infrastructure, While Delivering Better Visibility Into The Data Center's 3d Temperature And Humidity Distribution And Substantial Improvements In Energy Efficiency.
BibTeX:
@inproceedings{wireless:2011,
  author = {Rodriguez, M.G. And Ortiz Uriarte, L.E. And Jia, Yi And Yoshii, K. And Ross, R. And Beckman, P.H.},
  title = {Wireless Sensor Network For Datacenter Environmental Monitoring},
  booktitle = {2011 5th Icst},
  year = {2011},
  pages = {533--537},
  doi = {10.1109/ICSensT.2011.6137036}
}
Yang CT, Chou WL, Hsu CH and Cuzzocrea A (2011), "On Improvement of Cloud Virtual Machine Availability with Virtualization Fault Tolerance Mechanism", In 2011 IEEE Third International Conference on Cloud Computing Technology and Science., November, 2011. , pp. 122-129.
Abstract: Virtualization is a common strategy to improve the existing computing resources, particularly in cloud computing field. Hadoop, one of Apache projects, is designed to scale up from single servers to thousands of machines, and each offer local computation and storage. However, how to guarantee stability and reliability have become great study topics. In this article, we use current open-source based on software and platform to reach our goal. For instance, Xen-Hyper visor virtualization technology, Open Nebula virtual machines management tool, and so on. After extending component capabilities, we developed a mechanism to support our idea and reached Hadoop High Availability which called Virtualization Fault Tolerance (VFT). We consider a practical problem that occurs frequently in our system, and the results in this paper also confirm the downtime time can be shortened if failure occurred. In this case, it is not only for the Hadoop applications, but also extended to more areas of cluster-based systems.
BibTeX:
@inproceedings{yang-improvement:2011,
  author = {Yang, C. T. and Chou, W. L. and Hsu, C. H. and Cuzzocrea, A.},
  title = {On Improvement of Cloud Virtual Machine Availability with Virtualization Fault Tolerance Mechanism},
  booktitle = {2011 IEEE Third International Conference on Cloud Computing Technology and Science},
  year = {2011},
  pages = {122--129},
  doi = {10.1109/CloudCom.2011.26}
}
Ferreto TC, Netto MAS, Calheiros RN and De Rose CAF (2011), "Server Consolidation with Migration Control for Virtualized Data Centers", Future Gener. Comput. Syst.., October, 2011. Vol. 27(8), pp. 1027-1034.
Abstract: Virtualization has become a key technology for simplifying service management and reducing energy costs in data centers. One of the challenges faced by data centers is to decide when, how, and which virtual machines (VMs) have to be consolidated into a single physical server. Server consolidation involves VM migration, which has a direct impact on service response time. Most of the existing solutions for server consolidation rely on eager migrations, which try to minimize the number of physical servers running VMs. These solutions generate unnecessary migrations due to unpredictable workloads that require VM resizing. This paper proposes an LP formulation and heuristics to control VM migration, which prioritize virtual machines with steady capacity. We performed experiments using TU-Berlin and Google data center workloads to compare our migration control strategy against existing eager-migration-based solutions. We observed that avoiding migration of VMs with steady capacity reduces the number of migrations with minimal penalty in the number of physical servers.
BibTeX:
@article{ferreto-server:2011,
  author = {Ferreto, Tiago C. and Netto, Marco A. S. and Calheiros, Rodrigo N. and De Rose, César A. F.},
  title = {Server Consolidation with Migration Control for Virtualized Data Centers},
  journal = {Future Gener. Comput. Syst.},
  year = {2011},
  volume = {27},
  number = {8},
  pages = {1027--1034},
  url = {http://dx.doi.org/10.1016/j.future.2011.04.016},
  doi = {10.1016/j.future.2011.04.016}
}
Mcintosh S, Kephart J, Lenchner J, Feridun M, Nidd M, Tanner A, Yang B and Barabasi I (2011), "Semi-automated Data Center Hotspot Diagnosis", In 2011 7th International Conference On Network And Service Management (cnsm)., October, 2011. , pp. 1-7.
Abstract: An Increasingly Important Requirement For Energy-efficient Data Center Operation Is To Diagnose And Fix Thermal Anomalies That Sometimes Occur Due To Excessive Workload Or Equipment Failures. Today, The Task Of Diagnosing Thermal Anomalies Entails Expert But Tedious Analysis Of Data Collected Manually From Disparate Management Systems. Our Ultimate Goal Is To Substantially Reduce The Time, Tedium And Expertise Required To Diagnose Thermal Hotspots By Developing A System That Generates Accurate Diagnoses Automatically. We Describe A Substantial Step Towards This Goal: A Loosely-coupled, Semi-automated Thermal Diagnosis System That Integrates It And Facilities Data, Uses Simple Heuristics To Highlight The Most Likely Culprits, And Provides A Graphical Interface That Enables An Administrator To Narrow The List Further By Exploring Data Correlations. Among The Challenges Addressed By Our Solution Are Coping With Heterogeneous Data Types And Data Access Methods, And Detecting And Managing Erroneous Sensor Readings.
BibTeX:
@inproceedings{mcintosh-semi-automated:2011,
  author = {Mcintosh, S. And Kephart, J.o. And Lenchner, J. And Feridun, M. And Nidd, M. And Tanner, A. And Yang, B. And Barabasi, I.},
  title = {Semi-automated Data Center Hotspot Diagnosis},
  booktitle = {2011 7th International Conference On Network And Service Management (cnsm)},
  year = {2011},
  pages = {1--7}
}
Wang L and Khan SU (2011), "Review Of Performance Metrics For Green Dc: A Taxonomy Study", The Journal Of Supercomputing., October, 2011. , pp. 639-656.
Abstract: Data Centers Now Play An Important Role In Modern It Infrastructures. Although Much Research Effort Has Been Made In The Field Of Green Data Center Computing, Performance Metrics For Green Data Centers Have Been Left Ignored. This Paper Is Devoted To Categorization Of Green Computing Performance Metrics In Data Centers, Such As Basic Metrics Like Power Metrics, Thermal Metrics And Extended Performance Metrics I.e. Multiple Data Center Indicators. Based On A Taxonomy Of Performance Metrics, This Paper Summarizes Features Of Currently Available Metrics And Presents Insights For The Study On Green Data Center Computing.
BibTeX:
@article{wang-review:2011,
  author = {Wang, Lizhe And Khan, Samee U.},
  title = {Review Of Performance Metrics For Green Dc: A Taxonomy Study},
  journal = {The Journal Of Supercomputing},
  year = {2011},
  pages = {639--656},
  doi = {10.1007/s11227-011-0704-3}
}
Tholeti BP (2011), "Hypervisors, virtualization, and the cloud: Learn about hypervisors, system virtualization, and how it works in a cloud environment". Thesis at: IBM., September, 2011. , pp. 7.
Abstract: Read about hypervisor types and system virtualization in the first article of this series. This series starts with a background on hypervisor types, system virtualization, and then offers a look at the features of five hypervisors, their deployment processes, and the management issues you might encounter.
BibTeX:
@techreport{tholeti-hypervisors:2011,
  author = {Tholeti, Bhanu P},
  title = {Hypervisors, virtualization, and the cloud: Learn about hypervisors, system virtualization, and how it works in a cloud environment},
  school = {IBM},
  year = {2011},
  pages = {7},
  url = {http://www.ibm.com/developerworks/cloud/library/cl-hypervisorcompare/index.html}
}
Bolla R, Davoli F, Bruschi R, Christensen K, Cucchietti F and Singh S (2011), "The Potential Impact Of Green Technologies In Next-generation Wireline Networks: Is There Room For Energy Saving Optimization?", IEEE Communications Magazine., August, 2011. Vol. 49(8), pp. 80-86.
Abstract: Recently, Network Operators Around The World Reported Statistics Of Network Energy Requirements And The Related Carbon Footprint, Showing An Alarming And Growing Trend. Such High Energy Consumption Can Be Mainly Ascribed To Networking Equipment Designed To Work At Maximum Capacity With High And Almost Constant Dissipation, Independent Of The Traffic Load. However, Recent Developments Of Green Network Technologies Suggest The Chance To Build Future Devices Capable Of Adapting Their Performance And Energy Absorption To Meet Actual Workload And Operational Requirements. In Such A Scenario, This Contribution Aims At Evaluating The Potential Impact On Next-generation Wireline Networks Of Green Technologies In Economic And Environmental Terms. We Based Our Impact Analysis On The Real Network Energy-efficiency Targets Of An Ongoing European Project, And Applied Them To The Expected Deployment Of Telecom Italia Infrastructure By 2015-2020.
BibTeX:
@article{bolla-potential:2011,
  author = {Bolla, R. And Davoli, Franco And Bruschi, R. And Christensen, K. And Cucchietti, F. And Singh, S.},
  title = {The Potential Impact Of Green Technologies In Next-generation Wireline Networks: Is There Room For Energy Saving Optimization?},
  journal = {IEEE Communications Magazine},
  year = {2011},
  volume = {49},
  number = {8},
  pages = {80--86},
  doi = {10.1109/MCOM.2011.5978419}
}
Kusnetzky D (2011), "Virtualization: A Manager's Guide: Big Picture of the Who, What, and Where of Virtualization" Beijing ; Sebastopol, July, 2011. O'Reilly Media.
Abstract: What exactly is virtualization? As this concise book explains, virtualization is a smorgasbord of technologies that offer organizations many advantages, whether you're managing extremely large stores of rapidly changing data, scaling out an application, or harnessing huge amounts of computational power. With this guide, you get an overview of the five main types of virtualization technology, along with information on security, management, and modern use cases.Topics include:Access virtualization—Allows access to any application from any deviceApplication virtualization—Enables applications to run on many different operating systems and hardware platformsProcessing virtualization—Makes one system seem like many, or many seem like oneNetwork virtualization—Presents an artificial view of the network that differs from the physical realityStorage virtualization—Allows many systems to share the same storage devices, enables concealing the location of storage systems, and more
BibTeX:
@book{kusnetzky-virtualization:2011,
  author = {Kusnetzky, Dan},
  title = {Virtualization: A Manager's Guide: Big Picture of the Who, What, and Where of Virtualization},
  publisher = {O'Reilly Media},
  year = {2011},
  edition = {1 edition}
}
Bin E, Biran O, Boni O, Hadad E, Kolodner EK, Moatti Y and Lorenz DH (2011), "Guaranteeing High Availability Goals for Virtual Machine Placement", In 2011 31st International Conference on Distributed Computing Systems., June, 2011. , pp. 700-709.
Abstract: The placement of virtual machines (VMs) on a cluster of hosts under multiple constraints, including administrative (security, regulations) resource-oriented (capacity, energy), and QoS-oriented (performance) is a highly complex task. We define a new high-availability property for a VM; when a VM is marked as k-resilient, as long as there are up to k host failures, it should be guaranteed that it can be relocated to a non-failed host without relocating other VMs. Together with Hardware Predictive Failure Analysis and live migration, which enable VMs to be evacuated from a host before it fails, this property allows the continuous running of YMs on the cluster despite host failures. The complexity of the constraints associated with k-resiliency, which are naturally expressed by Second Order logic statements, prevented their integration into the placement computation until now. We present a novel algorithm which enables this integration by transforming the k-resiliency constraints to rules consumable by a generic Constraint Programming engine, prove that it guarantees the required resiliency and describe the implementation. We provide some preliminary results and compare our high availability support with naive solutions.
BibTeX:
@inproceedings{bin-guaranteeing:2011,
  author = {Bin, E. and Biran, O. and Boni, O. and Hadad, E. and Kolodner, E. K. and Moatti, Y. and Lorenz, D. H.},
  title = {Guaranteeing High Availability Goals for Virtual Machine Placement},
  booktitle = {2011 31st International Conference on Distributed Computing Systems},
  year = {2011},
  pages = {700--709},
  doi = {10.1109/ICDCS.2011.72}
}
Harnett C (2011), "Open Source Hardware For Instrumentation And Measurement", IEEE Instrumentation Measurement Magazine., June, 2011. Vol. 14(3), pp. 34-38.
Abstract: The Term ''open Source'' Originally Applied To Software Projects With Publicly Available Source Code For Others To Modify, Improve, And Compile. Modified Software Projects Were Then Often Required To Release Their Source Code Under The Terms Of The ''open Source'' Agreement. Currently, ''open Source'' Is Also Available For Hardware Projects And Includes Printed Circuit Board Designs, Photomask Layouts And Mechanical Assemblies. While The Scientific Community Requests Journal Authors To Provide Enough Information For Other Groups To Replicate Their Work, And Patent Examiners Hold Inventors To The Same Test, The Osh Community Requires Even More Details And Prefers That They Be Available Online. For Example, Downloadable Electronic Design Files For Printed Circuit Boards And 3-d Printable Enclosures Make It Possible For An Engineer To Modify An Instrument Design From The Desktop, Order The Parts From Several Different Manufacturers, And Receive A Customized Kit For Assembly - Or Even Have The Parts Assembled And Shipped. The Original Designers May Remain Completely Unaware Of The Development Or May Receive Credit Or A Royalty For Their Work Depending On The Terms Under Which They Released The Design.
BibTeX:
@article{harnett-open:2011,
  author = {Harnett, C.},
  title = {Open Source Hardware For Instrumentation And Measurement},
  journal = {IEEE Instrumentation Measurement Magazine},
  year = {2011},
  volume = {14},
  number = {3},
  pages = {34--38},
  doi = {10.1109/MIM.2011.5773535}
}
Kipp A, Jiang T and Fugini M (2011), "Green Metrics For Energy-aware It Systems", In 2011 International Conference On Complex, Intelligent And Software Intensive Systems (cisis)., June, 2011. , pp. 241-248.
Abstract: This Paper Presents A Novel Approach To Characterise Applications With Respect To Their Energy Consumption By Using A Set Of Energy-related Metrics, Called Green Metrics. These Indicators Are Based On Energy Consumption Measurements, Such As Indexes Of Computing Resource Usage, Of Environmental Impact, And Even Of Development Costs Required To (re)design An Application In Order To Optimise Its Energy Consumption Footprint, Or Of Organizational Factors Related To Application Management. Our Approach Is Framed In The Games (green Active Management Of Energy In It Service Centres) Eu Project1 About Green It. In This Paper, We Define Four Clusters Of Green Metrics Enabling To Feature An Application In Terms Of The Energy It Consumes At Run Time. Such Metrics Are The Basis For Measuring The ''greenness'' Of An Application And To Detect Where It Consumes And Wastes Energy. Hints Are Provided To Improve Applications Design And Execution. We Show Within An Application Scenario How Monitoring And Evaluation Of The Green Metrics Helps To Improve Energy Efficiency.
BibTeX:
@inproceedings{kipp-green:2011,
  author = {Kipp, A. And Jiang, Tao And Fugini, M.},
  title = {Green Metrics For Energy-aware It Systems},
  booktitle = {2011 International Conference On Complex, Intelligent And Software Intensive Systems (cisis)},
  year = {2011},
  pages = {241--248},
  doi = {10.1109/CISIS.2011.42}
}
Breitgand D and Epstein A (2011), "SLA-aware placement of multi-virtual machine elastic services in compute clouds", May, 2011. , pp. 161-168. IEEE.
BibTeX:
@inproceedings{breitgand-sla-aware:2011,
  author = {Breitgand, David and Epstein, Amir},
  title = {SLA-aware placement of multi-virtual machine elastic services in compute clouds},
  publisher = {IEEE},
  year = {2011},
  pages = {161--168},
  url = {http://ieeexplore.ieee.org/document/5990687/},
  doi = {10.1109/INM.2011.5990687}
}
Coulouris G, Dollimore J, Kindberg T and Blair G (2011), "Distributed Systems: Concepts and Design" Boston, May, 2011. Pearson.
Abstract: Broad and up-to-date coverage of the principles and practice in the fast moving area of Distributed Systems. Distributed Systems provides students of computer science and engineering with the skills they will need to design and maintain software for distributed applications. It will also be invaluable to software engineers and systems designers wishing to understand new and future developments in the field. From mobile phones to the Internet, our lives depend increasingly on distributed systems linking computers and other devices together in a seamless and transparent way. The fifth edition of this best-selling text continues to provide a comprehensive source of material on the principles and practice of distributed computer systems and the exciting new developments based on them, using a wealth of modern case studies to illustrate their design and development. The depth of coverage will enable readers to evaluate existing distributed systems and design new ones.
BibTeX:
@book{coulouris-distributed:2011,
  author = {Coulouris, George and Dollimore, Jean and Kindberg, Tim and Blair, Gordon},
  title = {Distributed Systems: Concepts and Design},
  publisher = {Pearson},
  year = {2011},
  edition = {5 edition}
}
Mansley C, Connell J, Isci C, Lenchner J, Kephart J, Mcintosh S and Schappert M (2011), "Robotic Mapping And Monitoring Of Data Centers", In 2011 IEEE International Conference On Robotics And Automation (icra)., May, 2011. , pp. 5905-5910.
Abstract: We Describe An Inexpensive Autonomous Robot Capable Of Navigating Previously Unseen Data Centers And Monitoring Key Metrics Such As Air Temperature. The Robot Provides Real-time Navigation And Sensor Data To Commercial Ibm Software, Thereby Enabling Real-time Generation Of The Data Center Layout, A Thermal Map And Other Visualizations Of Energy Dynamics. Once It Has Mapped A Data Center, The Robot Can Efficiently Monitor It For Hot Spots And Other Anomalies Using Intelligent Sampling. We Demonstrate The Robot's Effectiveness Via Experimental Studies From Two Production Data Centers.
BibTeX:
@inproceedings{mansley-robotic:2011,
  author = {Mansley, C. And Connell, J. And Isci, C. And Lenchner, J. And Kephart, J.o. And Mcintosh, S. And Schappert, M.},
  title = {Robotic Mapping And Monitoring Of Data Centers},
  booktitle = {2011 IEEE International Conference On Robotics And Automation (icra)},
  year = {2011},
  pages = {5905--5910},
  doi = {10.1109/ICRA.2011.5980554}
}
Sedaghat M, Hern´ndez F and Elmroth E (2011), "Unifying Cloud Management: Towards Overall Governance of Business Level Objectives", In 2011 11th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing., May, 2011. , pp. 591-597.
Abstract: We address the challenge of providing unified cloud resource management towards an overall business level objective, given the multitude of managerial tasks to be performed and the complexity of any architecture to support them. Resource level management tasks include elasticity control, virtual machine and data placement, autonomous fault management, etc, which are intrinsically difficult problems since services normally have unknown lifetime and capacity demands that varies largely over time. To unify the management of these problems, (for optimization with respect to some higher level business level objective, like optimizing revenue while breaking no more than a certain percentage of service level agreements)becomes even more challenging as the resource level managerial challenges are far from independent. After providing the general problem formulation, we review recent approaches taken by the research community, including mainly general autonomic computing technology for large-scale environments and resource level management tools equipped with some business oriented or otherwise qualitative features. We propose and illustrate a policy-driven approach where a high-level management system monitors overall system and services behavior and adjusts lower level policies (e.g., thresholds for admission control, elasticity control, server consolidation level, etc) for optimization towards the measurable business level objectives.
BibTeX:
@inproceedings{sedaghat-unifying:2011,
  author = {Sedaghat, M. and Hern´ndez, F. and Elmroth, E.},
  title = {Unifying Cloud Management: Towards Overall Governance of Business Level Objectives},
  booktitle = {2011 11th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing},
  year = {2011},
  pages = {591--597},
  doi = {10.1109/CCGrid.2011.65}
}
Huang Q, Gao F, Wang R and Qi Z (2011), "Power Consumption of Virtual Machine Live Migration in Clouds", In 2011 Third International Conference on Communications and Mobile Computing., April, 2011. , pp. 122-125. IEEE.
Abstract: Virtualization Technology has been employed increasingly widely in modern data centers in order to improve its energy efficiency. In particular, the capability of virtual machine(VM) migration brings multiple benefits for such as resources(CPU, memory, et al.) distribution, energy aware consolidation. However, the migration of virtual machines itself brings extra power consumption. For this reason, a better understanding of its effect on system power consumption is highly desirable. In this paper, we present a power consumption evaluation on the effects of live migration of VMs. Results show that the power overhead of migration is much less in the scenario of employing the strategy of consolidation than the regular deployment without using consolidation. Our results are based on the typical physical server, the power of which is linear model of CPU utilization percentage.
BibTeX:
@inproceedings{huang-power:2011,
  author = {Huang, Q. and Gao, F. and Wang, R. and Qi, Z.},
  title = {Power Consumption of Virtual Machine Live Migration in Clouds},
  booktitle = {2011 Third International Conference on Communications and Mobile Computing},
  publisher = {IEEE},
  year = {2011},
  pages = {122--125},
  url = {http://ieeexplore.ieee.org/document/5931175/},
  doi = {10.1109/CMC.2011.62}
}
Minoli D (2011), "Designing Green Networks And Network Operations: Saving Run-the-engine Costs", April, 2011. Crc Press.
Abstract: In Recent Years, Socio-political Trends Toward Environmental Responsibility And The Pressing Need To Reduce Run-the-engine (rte) Costs Have Resulted In The Concept Of Green It. Although A Significant Amount Of Energy Is Used To Operate Routing, Switching, And Transmission Equipment, Comparatively Less Attention Has Been Paid To Green Networking. A Clear And Concise Introduction To Green Networks And Green Network Operations, Designing Green Networks And Network Operations: Saving Run-the-engine Costs guides You Through The Techniques Available To Achieve Efficiency Goals For Corporate And Carrier Networks, Including Deploying More Efficient Hardware, Blade Form-factor Routers And Switches, And Pursuing Consolidation, Virtualization, And Network And Cloud Computing. The Book: Delineates Techniques To Minimize Network Power, Cooling, Floor Space, And Online Storage While Optimizing Service Performance, Capacity, And Availability Discusses Virtualization, Network Computing, And Web Services As Approaches For Green Data Centers And Networks Emphasizes Best Practices And Compliance With International Standards For Green Operations Extends The Green Data Center Techniques To The Networking Environment Incorporates Green Principles In The Intranet, Extranet, And The Entire It Infrastructures Reviews Networking, Power Management, Hvac And Crac Basics Presents Methodical Steps Toward A Seamless Migration To Green It And Green Networking
BibTeX:
@book{minoli-designing:2011,
  author = {Minoli, Daniel},
  title = {Designing Green Networks And Network Operations: Saving Run-the-engine Costs},
  publisher = {Crc Press},
  year = {2011}
}
Sung W-t and Hsu Y-c (2011), "Designing An Industrial Real-time Measurement And Monitoring System Based On Embedded System And Zigbee", Expert Systems With Applications., April, 2011. Vol. 38(4), pp. 4522-4529.
Abstract: With The Increasing Automation Of Factories, Factory Floors Are Being Covered With Machinery. Spaces Crowded With Machinery Are More Difficult And Dangerous For Personnel To Operate In. This System Attempts To Uses The Zigbee Embedded System To Improve Industrial Safety Quality. In Addition To Performing Existing Typical Monitoring Functions, This System Utilizes Zigbee Wireless Transmission Technology For Remote Monitoring. The Measurement Items Of The Industrial Applications Of This System Platform Include Length Filtering, Ground Vibration Sensing, Weight Grading, Electricity Sensing, Energy Monitoring, Temperature Monitoring, And Carbon Dioxide Concentration. Our Application Of Zigbee Combined With An Embedded System To Industrial Real-time Measurements Represents An Innovative Technology. In Addition To Discussing The System Platform In This Study, We Also Discuss Statistics And Analysis Of Measurement Data. Performing Wired And Wireless Synchronized Measurement And Monitoring Using This System Can Achieve Correct And Efficient Industrial Monitoring Operations.
BibTeX:
@article{sung-designing:2011,
  author = {Sung, Wen-tsai And Hsu, Yao-chi},
  title = {Designing An Industrial Real-time Measurement And Monitoring System Based On Embedded System And Zigbee},
  journal = {Expert Systems With Applications},
  year = {2011},
  volume = {38},
  number = {4},
  pages = {4522--4529},
  doi = {10.1016/j.eswa.2010.09.126}
}
(2011), "Cloud Computing: Principles and Paradigms" Hoboken, N.J, March, 2011. Wiley.
Abstract: The primary purpose of this book is to capture the state-of-the-art in Cloud Computing technologies and applications. The book will also aim to identify potential research directions and technologies that will facilitate creation a global market-place of cloud computing services supporting scientific, industrial, business, and consumer applications. We expect the book to serve as a reference for larger audience such as systems architects, practitioners, developers, new researchers and graduate level students. This area of research is relatively recent, and as such has no existing reference book that addresses it. This book will be a timely contribution to a field that is gaining considerable research interest, momentum, and is expected to be of increasing interest to commercial developers. The book is targeted for professional computer science developers and graduate students especially at Masters level. As Cloud Computing is recognized as one of the top five emerging technologies that will have a major impact on the quality of science and society over the next 20 years, its knowledge will help position our readers at the forefront of the field.
BibTeX:
@book{buyya-cloud:2011,,
  editor = {Buyya, Rajkumar and Broberg, James and Goscinski, Andrzej M.},
  title = {Cloud Computing: Principles and Paradigms},
  publisher = {Wiley},
  year = {2011},
  edition = {1 edition}
}
Khanjani A and Sulaiman R (2011), "The Aspects Of Choosing Open Source Versus Closed Source", In 2011 IEEE Symposium On Computers Informatics (isci)., March, 2011. , pp. 646-649.
Abstract: Closed Source Software, Is A Type Of Software That Is Licensed Under The Exclusive Legal Right Of Its Owner. It Is Also Purchasable By Users By Paying Amount Of Money. Open Source Software (oss) Is Software Available With Its Source Code Under An Open Source License To Study And Modify The Code. Open Source Software Development (ossd) Is The Process To Develop Oss. Many Industries Try Using Ossd As They See The Advantages Of Open Source Compared To Closed Source Software Development. This Research Presents The Reasons Of Recently Using Ossd Model Rather Than Traditional Closed Source Approach. The Result Is To Show The Differences Between Closed Source And Open Source Process And How Open Source Can Effect On Quality Through Its Particular Features. It Also Identifies And Addresses The Challenges And Benefits Faced By The Users Against Traditional Closed Source Model.
BibTeX:
@inproceedings{khanjani-aspects:2011,
  author = {Khanjani, A. And Sulaiman, Riza},
  title = {The Aspects Of Choosing Open Source Versus Closed Source},
  booktitle = {2011 IEEE Symposium On Computers Informatics (isci)},
  year = {2011},
  pages = {646--649},
  doi = {10.1109/ISCI.2011.5958992}
}
Tange O (2011), "GNU Parallel - The Command-Line Power Tool", ;login: The USENIX Magazine. Frederiksberg, Denmark, Feb, 2011. Vol. 36(1), pp. 42-47.
BibTeX:
@article{Tange2011a,
  author = {O. Tange},
  title = {GNU Parallel - The Command-Line Power Tool},
  journal = {;login: The USENIX Magazine},
  year = {2011},
  volume = {36},
  number = {1},
  pages = {42-47},
  url = {http://www.gnu.org/s/parallel},
  doi = {10.5281/zenodo.16303}
}
Bianzino AP, Raju AK and Rossi D (2011), "Apples-to-apples: A Framework Analysis For Energy-efficiency In Networks", Sigmetrics Perform. Eval. Rev.., January, 2011. Vol. 38(3), pp. 81-85.
Abstract: Research On Energy-efficiency Of Communication Networks Has Already Gained The Attention Of A Broad Research Community. Specifically, We Consider Efforts Towards Improving Environmental Sustainability By Making Networks Energyaware. An Important Step In This Direction Is Establishing A Comprehensive Methodology For Measuring And Reporting The Energy Consumption Of The Network. In This Work, We Compare And Contrast Various Energy-related Metrics Used In The Recent Literature, By Means Of A Taxonomy Definition, As Well As Through Relevant Case Studies. We Believe This To Be A First Necessary Step Towards The Definition Of A Common Framework For The Performance Evaluation Of Energy-aware Networks.
BibTeX:
@article{bianzino-apples:2011,
  author = {Bianzino, Aruna Prem And Raju, Anand Kishore And Rossi, Dario},
  title = {Apples-to-apples: A Framework Analysis For Energy-efficiency In Networks},
  journal = {Sigmetrics Perform. Eval. Rev.},
  year = {2011},
  volume = {38},
  number = {3},
  pages = {81--85},
  doi = {10.1145/1925019.1925036}
}
Avelar V (2011), "Guidance For Calculation Of Efficiency (pue) In Data Centers", Schneider Electric, Rueil Malmaison, France, White Paper. Vol. 158
BibTeX:
@article{avelar-guidance:2011,
  author = {Avelar, Victor},
  title = {Guidance For Calculation Of Efficiency (pue) In Data Centers},
  journal = {Schneider Electric, Rueil Malmaison, France, White Paper},
  year = {2011},
  volume = {158}
}
Busse DK (2011), "Who Needs Energy Management", In Chi '11 Extended Abstracts On Human Factors In Computing Systems. New York, Ny, Usa , pp. 1639-1644. ACM.
Abstract: In This Work-in-progress Report, Research Into The Potential Target Users For An Industrial Energy Management Solution Is Being Discussed With Reference To Both On-site And Remote User Interviews Conducted In 2010 With Energy Managers Of Several Us Companies In High Energy-intensity Manufacturing Industries.
BibTeX:
@inproceedings{busse-who:2011,
  author = {Busse, Daniela K.},
  title = {Who Needs Energy Management},
  booktitle = {Chi '11 Extended Abstracts On Human Factors In Computing Systems},
  publisher = {ACM},
  year = {2011},
  pages = {1639--1644},
  doi = {10.1145/1979742.1979821}
}
Crago S, Dunn K, Eads P, Hochstein L, Kang DI, Kang M, Modium D, Singh K, Suh J and Walters JP (2011), "Heterogeneous Cloud Computing", In 2011 IEEE International Conference On Cluster Computing. , pp. 378-385.
Abstract: Current Cloud Computing Infrastructure Typically Assumes A Homogeneous Collection Of Commodity Hardware, With Details About Hardware Variation Intentionally Hidden From Users. In This Paper, We Present Our Approach For Extending The Traditional Notions Of Cloud Computing To Provide A Cloud-based Access Model To Clusters That Contain A Heterogeneous Architectures And Accelerators. We Describe Our Ongoing Work Extending The Open Stack Cloud Computing Stack To Support Heterogeneous Architectures And Accelerators, And Our Experiences Running Open Stack On Our Local Heterogeneous Cluster Testbed.
BibTeX:
@inproceedings{crago-heterogeneous:2011,
  author = {Crago, S. And Dunn, K. And Eads, P. And Hochstein, L. And Kang, D. I. And Kang, M. And Modium, D. And Singh, K. And Suh, J. And Walters, J. P.},
  title = {Heterogeneous Cloud Computing},
  booktitle = {2011 IEEE International Conference On Cluster Computing},
  year = {2011},
  pages = {378--385},
  doi = {10.1109/CLUSTER.2011.49}
}
Embedds (2011), "Adc On Atmega328. Part 1".
Abstract: Microcontrollers Are Meant To Deal With Digital Information. They Only Understant '0' And '1' Values. So What If We Need To Get Some Non Digital Data In To Microcontroller. The Only Way Is To Digit...
BibTeX:
@misc{embedds-adc:2011,
  author = {Embedds},
  title = {Adc On Atmega328. Part 1},
  journal = {Embedded Projects From Around The Web},
  year = {2011}
}
Ganga GMD, Carpinetti LCR and Politano PR (2011), "A Fuzzy Logic Approach To Supply Chain Performance Management", Gestão E Produção. Vol. 18(4), pp. 755-774.
BibTeX:
@article{ganga-fuzzy:2011,
  author = {Ganga, Gilberto Miller Devós And Carpinetti, Luiz Cesar Ribeiro And Politano, Paulo Rogério},
  title = {A Fuzzy Logic Approach To Supply Chain Performance Management},
  journal = {Gestão E Produção},
  year = {2011},
  volume = {18},
  number = {4},
  pages = {755--774},
  doi = {10.1590/S0104-530X2011000400006}
}
Jansen W and Grance T (2011), "SP 800-144. Guidelines on Security and Privacy in Public Cloud Computing". Thesis at: National Institute of Standards & Technology -- NIST. Gaithersburg, MD, United States
Abstract: Cloud computing can and does mean different things to different people. The common characteristics most interpretations share are on-demand scalability of highly available and reliable pooled computing resources, secure access to metered services from nearly anywhere, and displacement of data and services from inside to outside the organization. While aspects of these characteristics have been realized to a certain extent, cloud computing remains a work in progress. This publication provides an overview of the security and privacy challenges pertinent to public cloud computing and points out considerations organizations should take when outsourcing data, applications, and infrastructure to a public cloud environment.
BibTeX:
@techreport{jansen-nist:2011,
  author = {Jansen, Wayne and Grance, Timothy},
  title = {SP 800-144. Guidelines on Security and Privacy in Public Cloud Computing},
  school = {National Institute of Standards & Technology -- NIST},
  year = {2011}
}
Koomey J (2011), "Growth In Data Center Electricity Use 2005 To 2010", A Report By Analytical Press, Completed At The Request Of The New York Times. , pp. 9.
BibTeX:
@article{koomey-growth:2011,
  author = {Koomey, Jonathan},
  title = {Growth In Data Center Electricity Use 2005 To 2010},
  journal = {A Report By Analytical Press, Completed At The Request Of The New York Times},
  year = {2011},
  pages = {9},
  url = {http://www.missioncriticalmagazine.com/ext/resources/MC/Home/Files/PDFs/Koomey_Data_Center.pdf}
}
Lenchner J, Isci C, Kephart JO, Mansley C, Connell J and Mcintosh S (2011), "Towards Data Center Self-diagnosis Using A Mobile Robot", In Proceedings Of The 8th ACM International Conference On Autonomic Computing. New York, Ny, Usa , pp. 81-90. ACM.
Abstract: We Describe An Inexpensive Robot That Serves As A Physical Autonomic Element, Capable Of Navigating, Mapping And Monitoring Data Centers With Little Or No Human Involvement, Even Ones That It Has Never Seen Before. Through A Series Of Real Experiments And Simulations, We Establish That The Robot Is Sufficiently Accurate, Efficient And Robust To Be Of Practical Benefit In Real Data Center Environments. We Demonstrate How The Robot's Integration With Maximo For Energy Optimization, A Commercial Data Center Energy Management Product, Supports Autonomic Management At The Level Of The Data Center As A Whole, Particularly Self-diagnosis Of Emerging Thermal Problems.
BibTeX:
@inproceedings{lenchner-towards:2011,
  author = {Lenchner, Jonathan And Isci, Canturk And Kephart, Jeffrey O. And Mansley, Christopher And Connell, Jonathan And Mcintosh, Suzanne},
  title = {Towards Data Center Self-diagnosis Using A Mobile Robot},
  booktitle = {Proceedings Of The 8th ACM International Conference On Autonomic Computing},
  publisher = {ACM},
  year = {2011},
  pages = {81--90},
  doi = {10.1145/1998582.1998597}
}
Marston S, Li Z, Bandyopadhyay S, Zhang J and Ghalsasi A (2011), "Cloud computing — The business perspective", Decision Support Systems. Vol. 51(1), pp. 176-189.
Abstract: The evolution of cloud computing over the past few years is potentially one of the major advances in the history of computing. However, if cloud computing is to achieve its potential, there needs to be a clear understanding of the various issues involved, both from the perspectives of the providers and the consumers of the technology. While a lot of research is currently taking place in the technology itself, there is an equally urgent need for understanding the business-related issues surrounding cloud computing. In this article, we identify the strengths, weaknesses, opportunities and threats for the cloud computing industry. We then identify the various issues that will affect the different stakeholders of cloud computing. We also issue a set of recommendations for the practitioners who will provide and manage this technology. For IS researchers, we outline the different areas of research that need attention so that we are in a position to advice the industry in the years to come. Finally, we outline some of the key issues facing governmental agencies who, due to the unique nature of the technology, will have to become intimately involved in the regulation of cloud computing.
BibTeX:
@article{marston-cloud:2011,
  author = {Marston, Sean and Li, Zhi and Bandyopadhyay, Subhajyoti and Zhang, Juheng and Ghalsasi, Anand},
  title = {Cloud computing — The business perspective},
  journal = {Decision Support Systems},
  year = {2011},
  volume = {51},
  number = {1},
  pages = {176--189},
  url = {http://www.sciencedirect.com/science/article/pii/S0167923610002393},
  doi = {10.1016/j.dss.2010.12.006}
}
Masanet ER, Brown RE, Shehabi A, Koomey JG and Nordman B (2011), "Estimating The Energy Use And Efficiency Potential Of U.s. Data Centers", Proceedings Of The IEEE. Vol. 99(8), pp. 1440-1453.
Abstract: Data Centers Are A Significant And Growing Component Of Electricity Demand In The United States. This Paper Presents A Bottom-up Model That Can Be Used To Estimate Total Data Center Electricity Demand Within A Region As Well As The Potential Electricity Savings Associated With Energy Efficiency Improvements. The Model Is Applied To Estimate 2008 U.s. Data Center Electricity Demand And The Technical Potential For Electricity Savings Associated With Major Measures For It Devices And Infrastructure Equipment. Results Suggest That 2008 Demand Was Approximately 69 Billion Kilowatt Hours (1.8% Of 2008 Total U.s. Electricity Sales) And That It May Be Technically Feasible To Reduce This Demand By Up To 80% (to 13 Billion Kilowatt Hours) Through Aggressive Pursuit Of Energy Efficiency Measures. Measure-level Savings Estimates Are Provided, Which Shed Light On The Relative Importance Of Different Measures At The National Level. Measures Applied To Servers Are Found To Have The Greatest Contribution To Potential Savings.
BibTeX:
@article{masanet-estimating:2011,
  author = {Masanet, E. R. And Brown, R. E. And Shehabi, A. And Koomey, J. G. And Nordman, B.},
  title = {Estimating The Energy Use And Efficiency Potential Of U.s. Data Centers},
  journal = {Proceedings Of The IEEE},
  year = {2011},
  volume = {99},
  number = {8},
  pages = {1440--1453},
  doi = {10.1109/JPROC.2011.2155610}
}
Medina V and Garcıa JM (2011), "Live replication of virtual machines", In Proc. 10th WSEAS Intl. Conf. Software Engineering, Parallel and Distributed Systems,(Stevens Point, WI). , pp. 15-23.
BibTeX:
@inproceedings{medina-live:2011,
  author = {Medina, Violeta and Garcıa, J. M.},
  title = {Live replication of virtual machines},
  booktitle = {Proc. 10th WSEAS Intl. Conf. Software Engineering, Parallel and Distributed Systems,(Stevens Point, WI)},
  year = {2011},
  pages = {15--23},
  url = {http://www.wseas.us/e-library/conferences/2011/Cambridge/SEPADS/SEPADS-01.pdf}
}
Mell PM and Grance T (2011), "SP 800-145. The NIST Definition of Cloud Computing". Thesis at: National Institute of Standards & Technology. Gaithersburg, MD, United States
Abstract: Cloud computing is a model for enabling ubiquitous, convenient, on-demand network access to a shared pool of configurable computing resources (e.g., networks, servers, storage, applications, and services) that can be rapidly provisioned and released with minimal management effort or service provider interaction. This cloud model is composed of five essential characteristics, three service models, and four deployment models.
BibTeX:
@techreport{mell-nist:2011,
  author = {Mell, Peter M. and Grance, Timothy},
  title = {SP 800-145. The NIST Definition of Cloud Computing},
  school = {National Institute of Standards & Technology},
  year = {2011}
}
Montoya FG, Alcayde A, Sánchez P, Gómez J and Martín F (2011), "Zenergy: An Open Source Project For Power Quality Assessment And Monitoring", In 2011 International Conference On Power Engineering, Energy And Electrical Drives (powereng). , pp. 1-6.
Abstract: In This Paper, A New Open Source Project Focused On Power Quality Assessment And Monitoring For Low Voltage Power Systems Is Presented. Power Quality (pq) Is A Crucial Matter For Proper And Reliable Operation Of Industrial Or Home Electrical Appliances. In Order To Improve Pq Techniques, Efforts Are Made To Develop Smart Sensors That Can Report Near Real-time Data. Proprietary Software And Hardware On Dedicated Computers Or Servers Processes These Data And Shows Relevant Information Through Tables Or Graphics. In This Situation, Interoperability, Compatibility And Scalability Are Not Possible Because Of The Lack Of Open Protocols. In Our Work, We Introduce Zenergy, An Open Source Platform For Computing, Storing And Managing All Of The Information Generated From Smart Sensors. We Apply The Most Up-to-date Algorithms Developed For Power Quality, Event Detection, And Harmonic Analysis Or Power Metering. Zenergy Makes Use Of Cutting-edge Web Technologies Such As Html5, Css3 And Javascript To Provide User-friendly Interaction And Powerful Capabilities For The Analysis, Measurement And Monitoring Of Power Systems. All Software Used In Our Work Is Open Source, Running On Linux.
BibTeX:
@inproceedings{montoya-zenergy:2011,
  author = {Montoya, F. G. And Alcayde, A. And Sánchez, P. And Gómez, J. And Martín, F.},
  title = {Zenergy: An Open Source Project For Power Quality Assessment And Monitoring},
  booktitle = {2011 International Conference On Power Engineering, Energy And Electrical Drives (powereng)},
  year = {2011},
  pages = {1--6},
  doi = {10.1109/PowerEng.2011.6036474}
}
Shaw M (2011), "Server Design On Open Compute Project".
BibTeX:
@misc{opencompute:2011,
  author = {Shaw, Mark},
  title = {Server Design On Open Compute Project},
  journal = {Open Compute},
  year = {2011}
}
Huang KC and Lai KP (2010), "Processor allocation policies for reducing resource fragmentation in multi-cluster grid and cloud environments", In 2010 International Computer Symposium (ICS2010)., December, 2010. , pp. 971-976.
Abstract: Multi-cluster is the common underlying architecture of most grid and cloud environments, which usually consist of multiple clusters located at different places. One important characteristic of such computing environments is the performance difference between intra-cluster and inter-cluster communications. Intra-cluster communication networks usually have shorter latency and larger bandwidth than inter-cluster networks. Therefore, in those systems parallel jobs are intended to be executed within a single one of the clusters to achieve better performance although co-allocation across multiple clusters is sometimes technically possible. Under such job execution policy resource fragmentation becomes a crucial issue that happens when there is no single cluster being able to accommodate a job while the total number of processors in the entire grid or cloud is enough for the job. This paper proposes a most-fit policy to reduce resource fragmentation occurrences and evaluates it with several existing processor allocation policies. The experimental results indicate that careful selection of processor allocation policies can improve overall system performance greatly and the proposed most-fit policy can outperform other policies in most conditions.
BibTeX:
@inproceedings{huang-processor:2010,
  author = {Huang, K. C. and Lai, K. P.},
  title = {Processor allocation policies for reducing resource fragmentation in multi-cluster grid and cloud environments},
  booktitle = {2010 International Computer Symposium (ICS2010)},
  year = {2010},
  pages = {971--976},
  doi = {10.1109/COMPSYM.2010.5685368}
}
Osier Mixon J (2010), "Hardware Aberto: Como e Quando Funciona". December, 2010.
BibTeX:
@misc{osier-hardware:2010,
  author = {Osier, Mixon, J.},
  title = {Hardware Aberto: Como e Quando Funciona},
  school = {Ibm Corporation},
  year = {2010}
}
Symanski D and Watkins C (2010), "380vdc Data Center At Duke Energy", Emerging Technology Summit At Electric Power Research Institute., November, 2010. Vol. 9
BibTeX:
@article{symanski-380vdc:2010,
  author = {Symanski, D. And Watkins, C.},
  title = {380vdc Data Center At Duke Energy},
  journal = {Emerging Technology Summit At Electric Power Research Institute},
  year = {2010},
  volume = {9}
}
Speitkamp B and Bichler M (2010), "A Mathematical Programming Approach for Server Consolidation Problems in Virtualized Data Centers", IEEE Transactions on Services Computing., October, 2010. Vol. 3(4), pp. 266-278.
BibTeX:
@article{speitkamp-mathematical:2010,
  author = {Speitkamp, B and Bichler, M},
  title = {A Mathematical Programming Approach for Server Consolidation Problems in Virtualized Data Centers},
  journal = {IEEE Transactions on Services Computing},
  year = {2010},
  volume = {3},
  number = {4},
  pages = {266--278},
  url = {http://ieeexplore.ieee.org/document/5467027/},
  doi = {10.1109/TSC.2010.25}
}
Cioara T, Pop C, Anghel I, Salomie I, Dinsoreanu M, Condor I and Mihaly F (2010), "Immune-inspired Technique For Optimizing Server's Energy Consumption", In 2010 IEEE International Conference On Intelligent Computer Communication And Processing (iccp)., August, 2010. , pp. 273-280.
Abstract: This Paper Presents An Immune-inspired Technique For Optimizing A Server Energy Consumption. The Proposed Technique Is Similar With An Artificial Immune System Associated To A Server, Aiming To Detect Non-optimal Server Energy Consumption States And To Take The Appropriate Actions That Would Bring The Server Into An Optimal State. The Optimization Technique Has Two Main Stages: An Initialization Stage And A Self-optimization Stage. In The Initialization Stage The Server Is Monitored For A Specific Period Of Time To Collect Energy Consumption Historical Raw Data For Identifying Associations Between The Server Energy Consumption States And The Appropriate Optimization Actions. In The Self-optimization Stage, Energy Consumption Server State Snapshots Are Taken At Regular Time Intervals And Formally Represented Using A Biologically-inspired Antigen Model. The Obtained Antigen Is Then Classified As Self (optimal Energy Consumption State) Or Non-self (non-optimal Energy Consumption State) Using A Set Of Detectors Obtained In The Initialization Stage. For Non-self Antigens A Biologically-inspired Clonal Selection Approach Is Used To Determine The Actions That Need To Be Taken To Bring The Server In An Optimal Energy Consumption State.
BibTeX:
@inproceedings{cioara-immune-inspired:2010,
  author = {Cioara, T. And Pop, C.b. And Anghel, I. And Salomie, I. And Dinsoreanu, M. And Condor, I. And Mihaly, F.},
  title = {Immune-inspired Technique For Optimizing Server's Energy Consumption},
  booktitle = {2010 IEEE International Conference On Intelligent Computer Communication And Processing (iccp)},
  year = {2010},
  pages = {273--280},
  doi = {10.1109/ICCP.2010.5606424}
}
Iyengar M, Schmidt R and Caricari J (2010), "Reducing Energy Usage In Data Centers Through Control Of Room Air Conditioning Units", In 2010 12th IEEE Itherm., June, 2010. , pp. 1-11.
Abstract: Information Technology Data Centers Consume A Large Amount Of Electricity In The Us And World-wide. Cooling Has Been Found To Contribute About One Third Of This Energy Use. The Two Primary Contributors To The Data Center Cooling Energy Use Are The Refrigeration Chiller And The Computer Room Air Conditioning Units (cracs). There Have Been Recent Changes In Specifications For The Data Center Environmental Envelopes As Mandated By Ashrae (american Society For Heating Refrigeration And Air Conditioning Engineers), One Of Which Specifically Pertains To The Upper And Lower Bound Of Air Temperatures At The Inlet To Servers That Are Housed In Data Center Rooms. These Changes Have Been Put In Place In Part To Address The Desire For Greater Cooling Energy Efficiency Of These Facilities. This Paper Primarily Focuses On The Methodologies To Reduce The Energy Usage Of Room Air Conditioning Devices By Exploiting These Recent Changes In Standards For The Equipment Environmental Envelope. A 22000 Square Foot (6706 M2) Data Center With 739 Kilo Watt Of It Load Is Used As A Representative Example For Numerical Cfd Analyses Using A Commercial Software Package To Demonstrate Methodologies To Reduce The Cooling Energy Use Of Information Technology Data Centers. Several Test Case Simulations Are Used To Enable The Calculation Of Room Level Air Temperature Fields For Varying Design Conditions Such As Different Numbers Of Operational Cracs Or The Volumetric Air Flow Rate Setting Of The Cracs. Computation Of Cooling Energy Is Carried Out Using Available Vendor Equipment Information. The Relationship Between The Reduction In Energy Usage In Crac Units And The Server Inlet Air Temperatures Are Quantified And A Systematic Methodology For Crac Shut Off Is Proposed. The Relative Magnitude Of Reduction In Data Center Cooling Energy Use From Shutting Off Cracs Or Reducing Crac Motor Speeds Is Also Compared With Scenarios Involving Increases In Refrigeration Chiller Plant Water Temperatu- - Re Set Point.
BibTeX:
@inproceedings{iyengar-reducing:2010,
  author = {Iyengar, M. And Schmidt, R. And Caricari, J.},
  title = {Reducing Energy Usage In Data Centers Through Control Of Room Air Conditioning Units},
  booktitle = {2010 12th IEEE Itherm},
  year = {2010},
  pages = {1--11},
  doi = {10.1109/ITHERM.2010.5501418}
}
Arno R, Friedl A, Gross P and Schuerger R (2010), "Reliability Of Example Data Center Designs Selected By Tier Classification", In 2010 IEEE Industrial And Commercial Power Systems Technical Conference (i Cps)., May, 2010. , pp. 1-8.
Abstract: When The Concept Of Reliability Began To Formally Become An Integrated Engineering Approach In The 50's, Reliability Was Associated With Failure Rate. Today The Term “reliability” Is Used As An Umbrella Definition Covering A Variety Of Subjects Including Availability, Durability, Quality And Sometimes The Function Of The Product. Reliability Engineering Was Developed To Quantify “how Reliable” A Component, Product Or System Was When Used In A Specific Application For A Specific Period Of Time. The Data Center Industry Has Come To Rely On “tier Classifications” As Presented In A Number Of Papers By The Uptime Institute [1] As A Gradient Scale Of Data Center Configurations And Requirements From Least (tier 1) To Most Reliable (tier 4). This Paper Will Apply The Principles And Modeling Techniques Of Reliability Engineering To Specific Examples That Were Selected Based On Gradient Scale Provided By The Tier Classifications And Discuss The Results. A Review Of The Metrics Of Reliability Engineering Being Used Will Also Be Included.
BibTeX:
@inproceedings{arno-reliability:2010,
  author = {Arno, R. And Friedl, A And Gross, P. And Schuerger, R.},
  title = {Reliability Of Example Data Center Designs Selected By Tier Classification},
  booktitle = {2010 IEEE Industrial And Commercial Power Systems Technical Conference (i Cps)},
  year = {2010},
  pages = {1--8},
  doi = {10.1109/ICPS.2010.5489890}
}
Machida F, Kawato M and Maeno Y (2010), "Redundant virtual machine placement for fault-tolerant consolidated server clusters", In 2010 IEEE Network Operations and Management Symposium - NOMS 2010., April, 2010. , pp. 32-39.
Abstract: Consolidated server systems using server virtualization involves serious risks of host server failures that induce unexpected downs of all hosted virtual machines and applications. To protect applications requiring high-availability from unpredictable host server failures, redundant configuration using virtual machines can be an effective countermeasure. This paper presents a virtual machine placement method for establishing a redundant configuration against host server failures with less host servers. The proposed method estimates the requisite minimum number of virtual machines according to the performance requirements of application services and decides an optimum virtual machine placement so that minimum configurations survive at any k host server failures. The evaluation results clarify that the proposed method achieves requested fault-tolerance level with less number of hosting servers compared to the conventional N+M redundant configuration approach.
BibTeX:
@inproceedings{machida-redundant:2010,
  author = {Machida, F. and Kawato, Masahiro and Maeno, Y.},
  title = {Redundant virtual machine placement for fault-tolerant consolidated server clusters},
  booktitle = {2010 IEEE Network Operations and Management Symposium - NOMS 2010},
  year = {2010},
  pages = {32--39},
  doi = {10.1109/NOMS.2010.5488431}
}
Ashrae (2010), "Real-time Energy Consumption Measurements In Data Centers: Ashrae Datacom" Atlanta, Ga, January, 2010. (9) American Society Of Heating, Refrigerating And Air-conditioning Engineers.
BibTeX:
@book{ashrae-real-time:2010,
  author = {Ashrae},
  title = {Real-time Energy Consumption Measurements In Data Centers: Ashrae Datacom},
  publisher = {American Society Of Heating, Refrigerating And Air-conditioning Engineers},
  year = {2010},
  number = {9}
}
Antonopoulos N and Gillam L (2010), "Cloud Computing: Principles, Systems And Applications" Springer Science & Business Media.
Abstract: Cloud Computing Has Recently Emerged As A Subject Of Substantial Industrial And Academic Interest, Though Its Meaning And Scope Is Hotly Debated. For Some Researchers, Clouds Are A Natural Evolution Towards The Full Commercialisation Of Grid Systems, While Others Dismiss The Term As A Mere Re-branding Of Existing Pay-per-use Technologies. From Either Perspective, ''cloud'' Is Now The Label Of Choice For Accountable Pay-per-use Access To Third Party Applications And Computational Resources On A Massive Scale. Clouds Support Patterns Of Less Predictable Resource Use For Applications And Services Across The It Spectrum, From Online Office Applications To High-throughput Transactional Services And High-performance Computations Involving Substantial Quantities Of Processing Cycles And Storage. The Concept Of Clouds Seems To Blur The Distinctions Between A Variety Of Technologies That Encompass Grid Services, Web Services And Data Centres, And Leads To Considerations Of Lowered-cost Provisioning For Bursty Applications. This Book Provides Comprehensive Coverage Of The State Of The Art In Cloud Computing, Highlighting And Clarifying The Conceptual And Systemic Links With Other Distributed Computing Approaches.
BibTeX:
@book{antonopoulos-cloud:2010,
  author = {Antonopoulos, Nikos And Gillam, Lee},
  title = {Cloud Computing: Principles, Systems And Applications},
  publisher = {Springer Science & Business Media},
  year = {2010}
}
Beloglazov A, Buyya R, Lee YC and Zomaya A (2010), "A taxonomy and survey of energy-efficient data centers and cloud computing systems", Arxiv:1007.0066 [cs].
Abstract: Traditionally, The Development Of Computing Systems Has Been Focused On Performance Improvements Driven By The Demand Of Applications From Consumer, Scientific And Business Domains. However, The Ever Increasing Energy Consumption Of Computing Systems Has Started To Limit Further Performance Growth Due To Overwhelming Electricity Bills And Carbon Dioxide Footprints. Therefore, The Goal Of The Computer System Design Has Been Shifted To Power And Energy Efficiency. To Identify Open Challenges In The Area And Facilitate Future Advancements It Is Essential To Synthesize And Classify The Research On Power And Energy-efficient Design Conducted To Date. In This Work We Discuss Causes And Problems Of High Power / Energy Consumption, And Present A Taxonomy Of Energy-efficient Design Of Computing Systems Covering The Hardware, Operating System, Virtualization And Data Center Levels. We Survey Various Key Works In The Area And Map Them To Our Taxonomy To Guide Future Design And Development Efforts. This Chapter Is Concluded With A Discussion Of Advancements Identified In Energy-efficient Computing And Our Vision On Future Research Directions.
BibTeX:
@article{beloglazov-taxonomy:2010,
  author = {Beloglazov, Anton And Buyya, Rajkumar And Lee, Young Choon And Zomaya, Albert},
  title = {A taxonomy and survey of energy-efficient data centers and cloud computing systems},
  journal = {Arxiv:1007.0066 [cs]},
  year = {2010},
  url = {http://arxiv.org/abs/1007.0066}
}
Chen Y, Ganapathi AS, Griffith R and Katz RH (2010), "Analysis and lessons from a publicly available google cluster trace". Thesis at: EECS Department, University of California, Berkeley. EECS Department, University of California, Berkeley (UCB/EECS-2010-95 94)
BibTeX:
@techreport{chen-analysis:2010,
  author = {Chen, Yanpei and Ganapathi, Archana Sulochana and Griffith, Rean and Katz, Randy H},
  title = {Analysis and lessons from a publicly available google cluster trace},
  school = {EECS Department, University of California, Berkeley},
  year = {2010},
  number = {UCB/EECS-2010-95 94},
  note = {UCB/EECS-2010-95 94}
}
Duy TVT, Sato Y and Inoguchi Y (2010), "Performance Evaluation Of A Green Scheduling Algorithm For Energy Savings In Cloud Computing", In Parallel & Distributed Processing, Workshops And Phd Forum (ipdpsw), 2010 IEEE International Symposium On. , pp. 1-8. IEEE.
BibTeX:
@inproceedings{duy-performance:2010,
  author = {Duy, Truong Vinh Truong And Sato, Yukinori And Inoguchi, Yasushi},
  title = {Performance Evaluation Of A Green Scheduling Algorithm For Energy Savings In Cloud Computing},
  booktitle = {Parallel & Distributed Processing, Workshops And Phd Forum (ipdpsw), 2010 IEEE International Symposium On},
  publisher = {IEEE},
  year = {2010},
  pages = {1--8},
  url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5470908}
}
Ford D, Labelle F, Popovici F, Stokely M, Truong V-A, Barroso L, Grimes C and Quinlan S (2010), "Availability in Globally Distributed Storage Systems", In Proceedings of the 9th USENIX Symposium on Operating Systems Design and Implementation.
BibTeX:
@inproceedings{ford-availability:2010,
  author = {Ford, Daniel and Labelle, Francois and Popovici, Florentina and Stokely, Murray and Truong, Van-Anh and Barroso, Luiz and Grimes, Carrie and Quinlan, Sean},
  title = {Availability in Globally Distributed Storage Systems},
  booktitle = {Proceedings of the 9th USENIX Symposium on Operating Systems Design and Implementation},
  year = {2010},
  url = {https://research.google.com/pubs/pub36737.html}
}
Heller B, Seetharaman S, Mahadevan P, Yiakoumis Y, Sharma P, Banerjee S and Mckeown N (2010), "Elastictree: Saving Energy In Data Center Networks.", Nsdi. Vol. 10, pp. 249-264.
BibTeX:
@article{heller-elastictree:2010,
  author = {Heller, Brandon And Seetharaman, Srinivasan And Mahadevan, Priya And Yiakoumis, Yiannis And Sharma, Puneet And Banerjee, Sujata And Mckeown, Nick},
  title = {Elastictree: Saving Energy In Data Center Networks.},
  journal = {Nsdi},
  year = {2010},
  volume = {10},
  pages = {249--264},
  url = {http://www.usenix.org/event/nsdi10/tech/full_papers/heller.pdf}
}
(2010), "2010 IEEE Network Operations and Management Symposium: NOMS 2010 ; Osaka, Japan, 19 - 23 April 2010" Piscataway, NJ IEEE.
BibTeX:
@collection{kiriha-2010:2010,,
  editor = {Kiriha, Yoshiaki and Institute of Electrical and Electronics Engineers and Communications Society and International Federation for Information Processing},
  title = {2010 IEEE Network Operations and Management Symposium: NOMS 2010 ; Osaka, Japan, 19 - 23 April 2010},
  publisher = {IEEE},
  year = {2010},
  note = {OCLC: 838470083}
}
Koslovski G, Yeow WL, Westphal C, Huu TT, Montagnat J and Vicat-Blanc P (2010), "Reliability Support in Virtual Infrastructures", In 2010 IEEE Second International Conference on Cloud Computing Technology and Science. , pp. 49-58.
Abstract: Through the recent emergence of joint resource and network virtualization, dynamic composition and provisioning of time-limited and isolated virtual infrastructures is now possible. One other benefit of infrastructure virtualization is the capability of transparent reliability provisioning (reliability becomes a service provided by the infrastructure). In this context, we discuss the motivations and gains of introducing customizable reliability of virtual infrastructures when executing large-scale distributed applications, and present a framework to specify, allocate and deploy virtualized infrastructure with reliability capabilities. An approach to efficiently specify and control the reliability at runtime is proposed. We illustrate these ideas by analyzing the introduction of reliability at the virtual-infrastructure level on a real application. Experimental results, obtained with an actual medical-imaging application running in virtual infrastructures provisioned in the experimental large-scale Grid'5000 platform, show the benefits of the virtualization of reliability.
BibTeX:
@inproceedings{koslovski-reliability:2010,
  author = {Koslovski, G. and Yeow, W. L. and Westphal, C. and Huu, T. T. and Montagnat, J. and Vicat-Blanc, P.},
  title = {Reliability Support in Virtual Infrastructures},
  booktitle = {2010 IEEE Second International Conference on Cloud Computing Technology and Science},
  year = {2010},
  pages = {49--58},
  doi = {10.1109/CloudCom.2010.23}
}
Lee YC and Zomaya AY (2010), "Energy Efficient Utilization Of Resources In Cloud Computing Systems", J Supercomput. Vol. 60(2), pp. 268-280.
Abstract: The Energy Consumption Of Under-utilized Resources, Particularly In A Cloud Environment, Accounts For A Substantial Amount Of The Actual Energy Use. Inherently, A Resource Allocation Strategy That Takes Into Account Resource Utilization Would Lead To A Better Energy Efficiency; This, In Clouds, Extends Further With Virtualization Technologies In That Tasks Can Be Easily Consolidated. Task Consolidation Is An Effective Method To Increase Resource Utilization And In Turn Reduces Energy Consumption. Recent Studies Identified That Server Energy Consumption Scales Linearly With (processor) Resource Utilization. This Encouraging Fact Further Highlights The Significant Contribution Of Task Consolidation To The Reduction In Energy Consumption. However, Task Consolidation Can Also Lead To The Freeing Up Of Resources That Can Sit Idling Yet Still Drawing Power. There Have Been Some Notable Efforts To Reduce Idle Power Draw, Typically By Putting Computer Resources Into Some Form Of Sleep/power-saving Mode. In This Paper, We Present Two Energy-conscious Task Consolidation Heuristics, Which Aim To Maximize Resource Utilization And Explicitly Take Into Account Both Active And Idle Energy Consumption. Our Heuristics Assign Each Task To The Resource On Which The Energy Consumption For Executing The Task Is Explicitly Or Implicitly Minimized Without The Performance Degradation Of That Task. Based On Our Experimental Results, Our Heuristics Demonstrate Their Promising Energy-saving Capability.
BibTeX:
@article{lee-energy:2010,
  author = {Lee, Young Choon And Zomaya, Albert Y.},
  title = {Energy Efficient Utilization Of Resources In Cloud Computing Systems},
  journal = {J Supercomput},
  year = {2010},
  volume = {60},
  number = {2},
  pages = {268--280},
  doi = {10.1007/s11227-010-0421-3}
}
Liaperdos I, Paraskevas I, Potirakis S and Rangoussi M (2010), "Building A Low-cost Network For Power-quality Monitoring With Open-source-hardware Nodes", In 7th Medpower 2010. , pp. 1-5.
Abstract: This Paper Presents The Guidelines For Building A Low-cost Network Suitable For Power-quality Monitoring. The Implementation Of A Standalone Basic Node Capable Of Extracting Various Power-quality Metrics (total Harmonic Distortion-thd, Crest Factor, Etc) In An Open-source Hardware Embedded System Board Is Described. By Utilizing Simple Signal Processing Techniques Directly In Hardware, This Node Maintains High Portability Due To Its Small Dimensions. Furthermore, Exploitation Of Its Built-in Tcp/ip Capabilities Allows The Deployment Of A Wide Area Network By The Interconnection Of Basic Nodes, In Order To Provide Power-quality Data Collection For Further Processing And Evaluation. Due To The Distributed Processing Load, The Proposed Network Can Achieve High Efficiency And Reliability.
BibTeX:
@inproceedings{liaperdos-building:2010,
  author = {Liaperdos, I. And Paraskevas, I. And Potirakis, S.m. And Rangoussi, M.},
  title = {Building A Low-cost Network For Power-quality Monitoring With Open-source-hardware Nodes},
  booktitle = {7th Medpower 2010},
  year = {2010},
  pages = {1--5},
  doi = {10.1049/cp.2010.0895}
}
Liu S, Quan G and Ren S (2010), "On-line Scheduling Of Real-time Services For Cloud Computing", In 2010 6th World Congress On Services. , pp. 459-464.
Abstract: In This Paper, We Introduce A Novel Utility Accrual Scheduling Algorithm For Real-time Cloud Computing Services. The Real-time Tasks Are Scheduled Non-preemptively With The Objective To Maximize The Total Utility. The Most Unique Characteristic Of Our Approach Is That, Different From The Traditional Utility Accrual Approach That Works Under One Single Time Utility Function (tuf), We Have Two Different Tufs-a Profit Tuf And A Penalty Tuf-associated With Each Task At The Same Time, To Model The Real-time Applications For Cloud Computing That Need Not Only To Reward The Early Completions But Also To Penalize The Abortions Or Deadline Misses Of Real-time Tasks. Our Experimental Results Show That Our Proposed Algorithm Can Significantly Outperform The Traditional Scheduling Algorithms Such As The Earliest Deadline First (edf), The Traditional Utility Accrual Scheduling Algorithm And An Early Scheduling Approach Based On The Similar Model.
BibTeX:
@inproceedings{liu-line:2010,
  author = {Liu, S. And Quan, G. And Ren, S.},
  title = {On-line Scheduling Of Real-time Services For Cloud Computing},
  booktitle = {2010 6th World Congress On Services},
  year = {2010},
  pages = {459--464},
  doi = {10.1109/SERVICES.2010.109}
}
Rieck B (2010), "Basic Analysis of Bin-Packing Heuristics".
Abstract: The benchmarks have been performed using an Intel Celeron M 1.5 GHz. The results are
not too surprising: Obviously, the Next-Fit heuristic is fastest because only 1 bin has to be
managed. However, due to the efficient data structure (a priority queue) that has been used
for the Max-Rest heuristic, this heuristic will generally be almost as fast as Next-Fit.
Furthermore, the implementation of the Best-Fit heuristic has a worst-case running time of O
(Kn), where K is the maximum weight. Thus, the slowest algorithms are First-Fit and First
BibTeX:
@misc{rieck-basic:2010,
  author = {Rieck, Bastian},
  title = {Basic Analysis of Bin-Packing Heuristics},
  publisher = {Interdisciplinary Center for Scientific Computing. Heildelberg University},
  year = {2010},
  url = {http://bastian.rieck.ru/uni/bin_packing/bin_packing.pdf}
}
Sosinsky B (2010), "Defining Cloud Computing", In Cloud Computing Bible. , pp. 1-22. Wiley Publishing, Inc..
BibTeX:
@incollection{sosinsky-defining:2010,
  author = {Sosinsky, Barrie},
  title = {Defining Cloud Computing},
  booktitle = {Cloud Computing Bible},
  publisher = {Wiley Publishing, Inc.},
  year = {2010},
  pages = {1--22},
  note = {DOI: 10.1002/9781118255674.ch1},
  url = {http://onlinelibrary.wiley.com/doi/10.1002/9781118255674.ch1/summary}
}
Yeow W-L, Westphal C and Kozat U (2010), "Designing and embedding reliable virtual infrastructures", In VISA '10 - ACM SIGCOMM workshop on Virtualized infrastructure systems and architectures. , pp. 33. ACM Press.
BibTeX:
@inproceedings{yeow-designing:2010,
  author = {Yeow, Wai-Leong and Westphal, Cédric and Kozat, Ulaş},
  title = {Designing and embedding reliable virtual infrastructures},
  booktitle = {VISA '10 - ACM SIGCOMM workshop on Virtualized infrastructure systems and architectures},
  publisher = {ACM Press},
  year = {2010},
  pages = {33},
  url = {http://portal.acm.org/citation.cfm?doid=1851399.1851406},
  doi = {10.1145/1851399.1851406}
}
Chen W, Nguyen ST, Coops R, Oetomo S and Feijs L (2009), "Wireless Transmission Design For Health Monitoring At Neonatal Intensive Care Units", In 2nd Isabel 2009., November, 2009. , pp. 1-6.
Abstract: Health Monitoring Is Crucial For The Survival Of The Ill And Fragile Infants Admitted At The Neonatal Intensive Care Unit (nicu) In A Hospital. However, The Adhesive Electrodes And Wires Cause Discomfort To The Patients And Hamper The Parent-child Interaction. In This Paper, We Propose The Application Of Wireless Transmission Technology For Neonatal Monitoring At Nicu. To Demonstrate The Design Concept, A Prototype Wireless Transmission System Is Built Using Bluesmirf And Arduino Pro Mini. Software Is Developed For Ensure The Correct Data Transmission, Detection And Display. The System Is Designed To Be Suitable For Integration Into A Non-invasive Monitoring Platform Such As A Smart Neonatal Jacket. Experimental Results Show That The Prototype System Successfully Transmits And Receives Data From Multiple Sensors Within The Range Of 20 M.
BibTeX:
@inproceedings{chen-wireless:2009,
  author = {Chen, Wei And Nguyen, Son Tung And Coops, R. And Oetomo, S.b. And Feijs, L.},
  title = {Wireless Transmission Design For Health Monitoring At Neonatal Intensive Care Units},
  booktitle = {2nd Isabel 2009},
  year = {2009},
  pages = {1--6},
  doi = {10.1109/ISABEL.2009.5373671}
}
Batagelj B, Marovt J, Troha M and Mahnic D (2009), "Digital Airbrush", In Elmar, 2009. Elmar '09. International Symposium., September, 2009. , pp. 305-308.
Abstract: The Basic Idea Is Simple: Graffiter Paints With A Specially Modified Can, Which When Pressed On The Cap Does Not Leave Color Traces, But Emits Infrared Light. The Computer Application Draws The Appropriate Graffiti On The Canvas, Which Is Positioned In Front Of The Graffiter. Infrared Camera Provides Detection Of The Light Source For The Application, Which Then Correctly Determines The Color, Size And Density (the Graffiter Regulates These Parameters Through The Speed Buttons, Which Are Located On The Can) And In The End Of This Process, The Current Drawing Track Is Projected On The Canvas. The Most Important Algorithms Used In The Implementation Of The Software Solution Are: An Algorithm For Locating The Brightest Area, The Algorithm For The Implementation Of The Drops And The Algorithm For Interpolation. Important Component Of Simulation Is Also A Bluetooth Connection To The Arduino Bt Platform, Which Is Connected To Three Potentiometers. These Are Used By Graffiter To Manipulate Painting Parameters. The Results Of Extensive Tests Have Shown That The Prototype Has Potential And That The Final Simulation Perfectly Follows The Basic Idea. The Problem Occurs Only In The Lack Of Quality Of The Equipment (infrared Camera, Computer Power) For Full Implementation.
BibTeX:
@inproceedings{batagelj-digital:2009,
  author = {Batagelj, B. And Marovt, J. And Troha, M. And Mahnic, D.},
  title = {Digital Airbrush},
  booktitle = {Elmar, 2009. Elmar '09. International Symposium},
  year = {2009},
  pages = {305--308}
}
Buratti C, Conti A, Dardari D and Verdone R (2009), "An Overview On Wireless Sensor Networks Technology And Evolution", Sensors., August, 2009. Vol. 9(9), pp. 6869-6896.
BibTeX:
@article{buratti-overview:2009,
  author = {Buratti, Chiara And Conti, Andrea And Dardari, Davide And Verdone, Roberto},
  title = {An Overview On Wireless Sensor Networks Technology And Evolution},
  journal = {Sensors},
  year = {2009},
  volume = {9},
  number = {9},
  pages = {6869--6896},
  url = {http://www.mdpi.com/1424-8220/9/9/6869/},
  doi = {10.3390/s90906869}
}
Silva MADLE (2009), "Uma Infraestrutura De Comando E Controle De Data Center Para Um Conjunto De Recursos Computacionais.". Thesis at: Usp., June, 2009.
Abstract: O Crescimento Das Necessidades De Recursos Computacionais Gerado Por Novas Classes De Aplicações Comerciais E Científicas Apresenta Um Novo Tipo De Desafio Para Infraestruturas Computacionais. O Acelerado Crescimento Das Demandas Por Recursos Promove Um Acelerado Crescimento No Número Absoluto De Elementos Computacionais Nestas. Nesse Cenário, O Provisionamento E A Operação De Sistemas Tornam-se Tarefas Progressivamente Complexas, Devido Primariamente Ao Aumento Em Escala. Este Trabalho Propõe Um Modelo Para Uma Infraestrutura Computacional Que Opera Como Um Repositório Abstrato De Recursos Computacionais De Tempo De Execução Com Níveis Variáveis De Consumo. Desenhado Para Operar Como Um Ensemble (i.e. Um Conjunto Coordenado) De Recursos Computacionais, Grandes Números De Elementos São Agregados Em Conjuntos De Servidores De Recursos De Processamento, Armazenamento E Comunicação. O Ensemble é Concebido E Implementado Com Ampla Utilização De Tecnologias De Virtualização E Possui Um Mecanismo De Provisionamento E Operação Organizado Como Uma Estrutura Distribuída De Comando E Controle (command And Control, Ou C2). A Implementação De Uma Prova De Conceito De Tal Infraestrutura Computacional é Apresentada, E A Validação Da Proposta é Realizada Através De Uma Combinação De Resultados Experimentais E Emulação.
BibTeX:
@phdthesis{silva-c2:2009,
  author = {Silva, Marcio Augusto De Lima E},
  title = {Uma Infraestrutura De Comando E Controle De Data Center Para Um Conjunto De Recursos Computacionais.},
  school = {Usp},
  year = {2009},
  url = {http://www.teses.usp.br/teses/disponiveis/3/3141/tde-01092009-151011/}
}
Genova F, Bellifemine F, Gaspardone M, Beoni M, Cuda A and Fici GP (2009), "Management System Based On Low Cost Wireless Sensor Network Technology, To Monitor, Control And Optimize Energy Consumption In Telecom Switch Plants And Data Centres", In 2009 4th Escon., May, 2009. , pp. 1-8.
Abstract: Telecom Italia (ti), Being One Of The Top Italian Electricity Consumers, Realized And Started To Deploy A Real-time Energy Management System (kaleidos) Based On Wireless Sensor Network Technology (wsn), To Monitor All Relevant Energy Parameters Of Its Switching Plants (e.g. Per-line Energy Consumption, Room Temperatures, Humidity And Lighting) And To Remotely Control Room Temperatures And Air Conditioning. Among The Features Of This System We Outline: (-) Average Cost Of Each Measurement Point Lower Than 100 Euro (excluding The Service Platform); (-) Negligible Installation And Maintenance Costs; (-) Long Life (textgreater2 Years, Better 3) Sensor Node Battery; (-) Reliability, Scalability And Security; (-) Self-configuring Wsn. The Architecture Of The System Consists Of 4 Layers: (-) A Low Cost Set Of Sensor Nodes Deployed In Each Switching Plant. Each Node Includes One Or More Sensors Interfaced With A Radio Communication Chip, Compliant To Zigbee Standard; (-) A Gateway For Each Wsn, Which Acts As A Data Sink For The Sensor Nodes And Which Connects The Wsn To The Server Platform Through An Internet Connection (lan Or Gsm-gprs); (-) A Service Platform (wsn-c), Able To Manage And Control Different Kinds Of Sensor Networks (energy Management, Health Care, Automotive, ...), To Collect And Store Data And To Make Them Available To Applications Via A Web Service; (-) A Web Application Which Provides A Graphical User Interface (gui) And Data Analysis Facilities. The First Trial Started In April 2007 And Demonstrated That An Energy Saving In The Order Of 10-15 % Was Possible By Simply Optimizing The Air Conditioning System Working Parameters. In 2008 Ti Decided To Deploy Kaleidos Into Its 300 Larger Plants. Typical Site Consists Of 10ï¿¿ï¿¿15 Rooms, 2000ï¿¿ï¿¿6000 M2, 1ï¿¿ï¿¿5 Gwh/year Energy Consumption And It Is Equipped With A Sensor Network Of 20ï¿¿ï¿¿30 Current Nodes And 20ï¿¿ï¿¿25 Environmental Nodes. Kaleidos Is Used Not Only To Monitor And Control The Energy And Thermal Behaviour Of Si- - Tes, But It Has Been Shown To Be Effective In Supporting The Design Of Fine-grained Energy Saving Actions And In Real-time Evaluation Of Those Actions. Kaleidos Is Easily Extendable To Other Industrial Building Or Offices And Can Be Interfaced Or Overlapped To A Bms.
BibTeX:
@inproceedings{genova-thermal:2009,
  author = {Genova, Fernando And Bellifemine, Fabio And Gaspardone, Marco And Beoni, Maurizio And Cuda, Alberto And Fici, Gian Piero},
  title = {Management System Based On Low Cost Wireless Sensor Network Technology, To Monitor, Control And Optimize Energy Consumption In Telecom Switch Plants And Data Centres},
  booktitle = {2009 4th Escon},
  year = {2009},
  pages = {1--8}
}
Stapelberg RF (2009), "Handbook of Reliability, Availability, Maintainability and Safety in Engineering Design", February, 2009. Springer Science & Business Media.
Abstract: Handbook of Reliability, Availability, Maintainability and Safety in Engineering Design studies the combination of various methods of designing for reliability, availability, maintainability and safety, as well as the latest techniques in probability and possibility modelling, mathematical algorithmic modelling, evolutionary algorithmic modelling, symbolic logic modelling, artificial intelligence modelling, and object-oriented computer modelling, in a logically structured approach to determining the integrity of engineering design. Handbook of Reliability, Availability, Maintainability and Safety in Engineering Design not only encompasses a depth of research into engineering design methods and techniques ranging from quantitative probability theory and expert judgement in Bayesian analysis to qualitative possibility theory, fuzzy logic and uncertainty in Markov analysis; from reliability block diagrams, fault trees, event trees and cause-consequence diagrams to Petri nets, genetic algorithms and artificial neural networks; but it also covers a breadth of research into the concept of integrity in engineering design. Such breadth of research is represented by the inclusion of the topics of reliability and performance, availability and maintainability, and safety and risk, in an overall concept of designing for integrity during the different phases of the engineering design process. These topics add significant value to the theoretical expertise and practical experience of process, chemical, civil, mechanical, electrical, and electronic engineers, by considering process engineering design from the point of view of ''what should be achieved'' to meet criteria for designing for reliability, availability, maintainability and safety.
BibTeX:
@book{stapelberg-handbook:2009,
  author = {Stapelberg, Rudolph Frederick},
  title = {Handbook of Reliability, Availability, Maintainability and Safety in Engineering Design},
  publisher = {Springer Science & Business Media},
  year = {2009}
}
Goldhar MP (2009), "Um Framework De Métricas De Produtividade E Eficiência Energética Em Data Centers". Thesis at: Ufpe. Recife
BibTeX:
@phdthesis{framework:2009,
  author = {Goldhar, Marcos Porto},
  title = {Um Framework De Métricas De Produtividade E Eficiência Energética Em Data Centers},
  school = {Ufpe},
  year = {2009},
  url = {http://repositorio.ufpe.br:8080/xmlui/handle/123456789/2306}
}
Norma ITU-T Y.1563 (2009), "Y.1563: Ethernet frame transfer and availability performance". Thesis at: International Telecommunication Union. (Y.1563), pp. 48.
BibTeX:
@techreport{itu-1563:2009,
  author = {Norma ITU-T Y.1563},
  title = {Y.1563: Ethernet frame transfer and availability performance},
  school = {International Telecommunication Union},
  year = {2009},
  number = {Y.1563},
  pages = {48},
  url = {http://www.itu.int/rec/T-REC-Y.1563-200901-I/en}
}
Paim R, Cardoso V, Caulliraux H and Clemente R (2009), "Gestão De Processos: Pensar, Agir E Aprender" Porto Alegre Bookman.
Abstract: Baseado Em Mais De 20 Anos De Pesquisa, Projetos De Aplicação E Testes, 'gestão De Processos' é Direcionado A Acadêmicos E Profissionais.
BibTeX:
@book{paim-gestao:2009,
  author = {Paim, R. And Cardoso, V. And Caulliraux, H. And Clemente, R.},
  title = {Gestão De Processos: Pensar, Agir E Aprender},
  publisher = {Bookman},
  year = {2009}
}
Sharma B (2009), "Applications Of Data Mining In The Management Of Performance And Power In Data Centers". Thesis at: Dcse, Psu.
Abstract: Performance And Power Issues Are Becoming Increasingly Important In The Design Of Large Data Centers For Supporting A Multitude Of Services. There Are Many Perspectives Of Addressing These Issues Using Various Computer Science Principles. In This Survey Report, I Will Discuss The Applications Of Data Mining Techniques To Manage Power And Performance In Data Centers. Although, The Use Of Data Mining In Designing Performance Centric And Energy Efficient Of Computer Systems Is Still In Its Infancy, I Will Elaborate On How Researchers Are Exploiting Data Mining And Machine Learning Approaches To Save Energy And Improve The Performance Of Computer Systems Ranging From Laptops To Large Data Centers. Specifically, I Will Summarize The Motivations, Current State Of The Art And Future Directions Of The Research In This Topic. I Will Brief Some Of The Recent And Notable Research Works In This Discipline. Towards The End, I Will Brief My Vision To Leverage The Recent Advances In The Applications Of Data Mining Techniques To Data Centers In My Phd Research Work.
BibTeX:
@techreport{sharma-applications:2009,
  author = {Sharma, Bikash},
  title = {Applications Of Data Mining In The Management Of Performance And Power In Data Centers},
  school = {Dcse, Psu},
  year = {2009}
}
Veras M (2009), "Datacenter - Componente Central De Infraestrutura De Ti" Rio De Janeiro Brasport.
Abstract: Este Livro Apresenta Uma Visão Sobre O 'datacenter', Componente Crítico Da Infraestrutura De Ti. O Livro é Modular E Detalha Os Aspectos Envolvidos Na Construção E No Projeto De Um 'datacenter' Tanto Do Ponto De Vista Lógico Como Do Ponto De Vista Físico. Temas Como Virtualização, Processamento, Armazenamento E Redes São Amplamente Tratados Sob A ótica Do 'datacenter'. Aspectos Do Projeto Da Estrutura De Energia E Refrigeração E Das Instalações Fazem Parte Da Obra.
BibTeX:
@book{veras-datacenter:2009,
  author = {Veras, Manoel},
  title = {Datacenter - Componente Central De Infraestrutura De Ti},
  publisher = {Brasport},
  year = {2009}
}
Squicciarini A, Lee W, Bertino E and Song C (2008), "A Policy-based Accountability Tool For Grid Computing Systems", In IEEE Asia-pacific Services Computing Conference, 2008. Apscc '08., December, 2008. , pp. 95-100.
Abstract: The Dynamic And Multi-organizational Nature Of Grid Systems Requires Effective And Efficient Accountability Systems To Scale For Accommodating Large Number Of Users And Resources. The Availability Of Detailed And Complete Accountability Data Is Crucial For Both The Grid Administrators And The Overall Grid Community. In This Paper We Present A Layered Architecture For Addressing The End-to-end Accountability Problem. We Introduce The Concept Of Accountability Agents, Entities In Charge Of Collecting Accountability Data, Keeping Track Of Submitted Jobs And Their Users. We Present A Simple Yet Effective Language To Specify The Relevant Accountability Data To Be Collected And Selectively Distributed By The Accountability Agents. Additionally, We Design A Decentralized And Scalable Approach To Accountability, So To Be Able To Monitor Jobs Workflow With Relatively Little Intrusion.
BibTeX:
@inproceedings{squicciarini-policy-based:2008,
  author = {Squicciarini, A.c. And Lee, Wonjun And Bertino, E. And Song, C.x.},
  title = {A Policy-based Accountability Tool For Grid Computing Systems},
  booktitle = {IEEE Asia-pacific Services Computing Conference, 2008. Apscc '08},
  year = {2008},
  pages = {95--100},
  doi = {10.1109/APSCC.2008.257}
}
Herring H and Sorrell S (2008), "Energy Efficiency and Sustainable Consumption: The Rebound Effect", November, 2008. Springer.
Abstract: This book challenges conventional wisdom by showing how, in some circumstances, improved energy efficiency may increase energy consumption. Relying upon energy efficiency to reduce carbon emissions could therefore be misguided. This book explores the broader implications for climate change and sustainable consumption.
BibTeX:
@book{herring-energy:2008,
  author = {Herring, H. and Sorrell, S.},
  title = {Energy Efficiency and Sustainable Consumption: The Rebound Effect},
  publisher = {Springer},
  year = {2008}
}
Gislason D (2008), "Zigbee Wireless Networking", October, 2008. Newnes.
Abstract: Zigbee Is A Standard Based On The IEEE 802.15.4 Standard For Wireless Personal Networks. This Standard Allows For The Creation Of Very Lost Cost And Low Power Networks - These Applications Run For Years Rather Than Months. These Networks Are Created From Sensors And Actuators And Can Wireless Control Many Electrical Products Such As Remote Controls, Medical, Industrial, And Security Sensors. Hundreds Of Companies Are Creating Applications Including Mitsubishi, Motorola, Freescale, And Siemens. This Book Is Written For Engineers Who Plan To Develop Zigbee Applications And Networks, To Understand How They Work, And To Evaluate This Technology To See If It Is Appropriate To A Particular Project. This Book Does Not Simply State Facts But Explains What Zigbee Can Do Through Detailed Code Examples.*details How To Plan And Develop Applications And Networks*zigbee Sensors Have Many Applications Including Industrial Automation, Medical Sensing, Remote Controls, And Security*hot Topic For Today's Electrical Engineer Because It Is Low Cost And Low Power
BibTeX:
@book{gislason-zigbee:2008,
  author = {Gislason, Drew},
  title = {Zigbee Wireless Networking},
  publisher = {Newnes},
  year = {2008}
}
Yick J, Mukherjee B and Ghosal D (2008), "Wireless Sensor Network Survey", Comput. Netw.., August, 2008. Vol. 52(12), pp. 2292-2330.
Abstract: A Wireless Sensor Network (wsn) Has Important Applications Such As Remote Environmental Monitoring And Target Tracking. This Has Been Enabled By The Availability, Particularly In Recent Years, Of Sensors That Are Smaller, Cheaper, And Intelligent. These Sensors Are Equipped With Wireless Interfaces With Which They Can Communicate With One Another To Form A Network. The Design Of A Wsn Depends Significantly On The Application, And It Must Consider Factors Such As The Environment, The Application's Design Objectives, Cost, Hardware, And System Constraints. The Goal Of Our Survey Is To Present A Comprehensive Review Of The Recent Literature Since The Publication Of [i.f. Akyildiz, W. Su, Y. Sankarasubramaniam, E. Cayirci, A Survey On Sensor Networks, IEEE Communications Magazine, 2002]. Following A Top-down Approach, We Give An Overview Of Several New Applications And Then Review The Literature On Various Aspects Of Wsns. We Classify The Problems Into Three Different Categories: (1) Internal Platform And Underlying Operating System, (2) Communication Protocol Stack, And (3) Network Services, Provisioning, And Deployment. We Review The Major Development In These Three Categories And Outline New Challenges.
BibTeX:
@article{yick-wireless:2008,
  author = {Yick, Jennifer And Mukherjee, Biswanath And Ghosal, Dipak},
  title = {Wireless Sensor Network Survey},
  journal = {Comput. Netw.},
  year = {2008},
  volume = {52},
  number = {12},
  pages = {2292--2330},
  doi = {10.1016/j.comnet.2008.04.002}
}
Richard E. B, Eric R. M, Bruce N, William F. T, Arman S, John S, Jonathan G. K, Dale A. S and Peter T. C (2008), "Server And Data Center Energy Efficiency: Public Law 109-431", Usa Federal Congress. Berkeley, Ca, June, 2008.
Abstract: This Report Was Prepared In Response To The Request From Congress Stated In Public Law 109-431 (h.r. 5646), ''an Act To Study And Promote The Use Of Energy Efficient Computer Servers In The United States.'' This Report Assesses Current Trends In Energy Use And Energy Costs Of Data Centers And Servers In The U.s. (especially Federal Government Facilities) And Outlines Existing And Emerging Opportunities Fo Improved Energy Efficiency. It Also Makes Recommendations For Pursuing These Energy-efficiency Opportunities Broadly Across The Country Though The Use Of Information And Incentive-based Programs. Findings From This Report Include:an Estimate That Data Centers Consumed About 61 Billion Kilowatt-hours (kwh) In 2006, Roughly 1.5% Of Total U.s. Electricity Consumption, Or About 4.5 Billion In Electricity Costs.federal Servers And Data Centers Alone Account For Approximately 6 Billion Kwh (10 Of This Electricity Consumption, Or About 4.5 Billion In Electricity Costs.assuming Current Trends Continue, In 5 Years The National Energy Consumption By Servers And Data Centers Is Expected To Nearly Double, To Nearly 100 Billion Kwh.existing Technologies And Strategies Could Reduce Typical Server Energy Use By An Estimated 25% - Even Greater Energy Savings Are Possible With Advanced Technologies.assuming State-of-the-art Energy Efficiency Practices Are Implemented Throughout U.s. Data Centers, This Projected Energy Use Can Be Reduced By Up To 55% Compared To Current Efficiency Trends. This Report Makes Several Recommendations For Policies To Achieve This Savings Potential. Among These Recommendations Are Standarized Permormance Measurement For Data Centers And Their Equipment, Leadership On Energy Efficiency In Federal Data Centers, A Private Sector Energy Challenge, Information On Best Practices, And Further Research And Development On Energy Efficiency Technologies And Practices.
BibTeX:
@article{brown2-report:2008,
  author = {Richard E., Brown And Eric R., Masanet And Bruce, Nordman And William F., Tschudi And Arman, Shehabi And John, Stanley And Jonathan G., Koomey And Dale A., Sartor And Peter T., Chan},
  title = {Server And Data Center Energy Efficiency: Public Law 109-431},
  journal = {Usa Federal Congress},
  year = {2008}
}
Brown RE, Masanet ER, Nordman B, Tschudi WF, Shehabi A, Stanley J, Koomey JG, Sartor DA and Chan PT (2008), "Report To Congress On Server And Data Center Energy Efficiency: Public Law 109-431", Lawrence Berkeley National Laboratory., June, 2008. (Lbnl- 363e), pp. 137.
Abstract: This Report Was Prepared In Response To The Request From Congress Stated In Public Law 109-431 (h .r . 5646), ”an Act To Study And Promote The Use Of Energy Efficient Computer Servers In The United States .” This Report Assesses Current Trends In Energy Use And Energy Costs Of Data Centers And Servers In The U.s. (especially Federal Government Facilities) And Outlines Existing And Emerging Opportunities Fo Improved Energy Efficiency . It Also Makes Recommendations For Pursuing These Energy-efficiency Opportunities Broadly Across The Country Though The Use Of Information And Incentive-based Programs .findings From This Report Include:an Estimate That Data Centers Consumed About 61 Billion Kilowatt-hours (kwh) In 2006, Roughly 1 .5% Of Total U.s. Electricity Consumption, Or About 4 .5 Billion In Electricity Costs .federal Servers And Data Centers Alone Account For Approximately 6 Billion Kwh (10 Of This Electricity Consumption, Or About 4 .5 Billion In Electricity Costs .assuming Current Trends Continue, In 5 Years The National Energy Consumption By Servers And Data Centers Is Expected To Nearly Double, To Nearly 100 Billion Kwh .existing Technologies And Strategies Could Reduce Typical Server Energy Use By An Estimated 25% - Even Greater Energy Savings Are Possible With Advanced Technologies .assuming State-of-the-art Energy Efficiency Practices Are Implemented Throughout U.s. Data Centers, This Projected Energy Use Can Be Reduced By Up To 55% Compared To Current Efficiency Trends .this Report Makes Several Recommendations For Policies To Achieve This Savings Potential . Among These Recommendations Are Standarized Permormance Measurement For Data Centers And Their Equipment, Leadership On Energy Efficiency In Federal Data Centers, A Private Sector Energy Challenge, Information On Best Practices, And Further Research And Development On Energy Efficiency Technologies And Practices .
BibTeX:
@article{brown-report:2008,
  author = {Brown, Richard E. And Masanet, Eric R. And Nordman, Bruce And Tschudi, William F. And Shehabi, Arman And Stanley, John And Koomey, Jonathan G. And Sartor, Dale A. And Chan, Peter T.},
  title = {Report To Congress On Server And Data Center Energy Efficiency: Public Law 109-431},
  journal = {Lawrence Berkeley National Laboratory},
  year = {2008},
  number = {Lbnl- 363e},
  pages = {137},
  url = {https://ses.lbl.gov/sites/all/files/pdf_1.pdf}
}
Tamura Y, Sato, Koji, Kihara, Seiji and Moriai, Satoshi (2008), "Kemari: Virtual Machine Synchronization for Fault Tolerance using DomT", Xen Summit 2008, USENIX ATC '08., June, 2008. , pp. 2.
Abstract: In recent years, Internet services have been growing in number and functionality. They are typically hosted on a number of commodity servers, and reducing the run-ning cost of these servers is a crucial problem for service
BibTeX:
@article{tamura-kemari::2008,
  author = {Tamura, Yoshi and Sato, Koji and Kihara, Seiji and Moriai, Satoshi},
  title = {Kemari: Virtual Machine Synchronization for Fault Tolerance using DomT},
  journal = {Xen Summit 2008, USENIX ATC '08},
  year = {2008},
  pages = {2},
  doi = {10.1.1.580.7704}
}
Tamura Y, Sato K, Kihara S and Moriai S (2008), "Kemari: Virtual Machine Synchronization for Fault Tolerance using DomT", Xen Summit 2008, USENIX ATC '08. Boston-USA, June, 2008.
BibTeX:
@article{tamura-kemari:2008,
  author = {Tamura, Yoshi and Sato, Koji and Kihara, Seiji and Moriai, Satoshi},
  title = {Kemari: Virtual Machine Synchronization for Fault Tolerance using DomT},
  journal = {Xen Summit 2008, USENIX ATC '08},
  year = {2008}
}
Al-Fares M, Loukissas A and Vahdat A (2008), "A Scalable, Commodity Data Center Network Architecture", In Proceedings of the ACM SIGCOMM 2008 Conference on Data Communication. New York, NY, USA , pp. 63-74. ACM.
Abstract: Today's data centers may contain tens of thousands of computers with significant aggregate bandwidth requirements. The network architecture typically consists of a tree of routing and switching elements with progressively more specialized and expensive equipment moving up the network hierarchy. Unfortunately, even when deploying the highest-end IP switches/routers, resulting topologies may only support 50% of the aggregate bandwidth available at the edge of the network, while still incurring tremendous cost. Non-uniform bandwidth among data center nodes complicates application design and limits overall system performance. In this paper, we show how to leverage largely commodity Ethernet switches to support the full aggregate bandwidth of clusters consisting of tens of thousands of elements. Similar to how clusters of commodity computers have largely replaced more specialized SMPs and MPPs, we argue that appropriately architected and interconnected commodity switches may deliver more performance at less cost than available from today's higher-end solutions. Our approach requires no modifications to the end host network interface, operating system, or applications; critically, it is fully backward compatible with Ethernet, IP, and TCP.
BibTeX:
@inproceedings{al-fares-scalable:2008,
  author = {Al-Fares, Mohammad and Loukissas, Alexander and Vahdat, Amin},
  title = {A Scalable, Commodity Data Center Network Architecture},
  booktitle = {Proceedings of the ACM SIGCOMM 2008 Conference on Data Communication},
  publisher = {ACM},
  year = {2008},
  pages = {63--74},
  url = {http://doi.acm.org/10.1145/1402958.1402967},
  doi = {10.1145/1402958.1402967}
}
Banzi M (2008), "Getting Started With Arduino" Sebastopol, Ca Make Books - Imprint Of: O'reilly Media.
Abstract: This Valuable Little Book Offers A Thorough Introduction To The Open-source Electronics Prototyping Platform That's Taking The Design And Hobbyist World By Storm. Getting Started With Arduino Gives You Lots Of Ideas For Arduino Projects And Helps You Get Going On Them Right Away. From Getting Organized To Putting The Final Touches On Your Prototype, All The Information You Need Is Right In The Book. Inside, You'll Learn About:interaction Design And Physical Computing The Arduino Hardware And Software Development Environment Basics Of Electricity And Electronics Prototyping On A Solderless Breadboard Drawing A Schematic Diagram And More. With Inexpensive Hardware And Open-source Software Components That You Can Download Free, Getting Started With Arduino Is A Snap. To Use The Introductory Examples In This Book, All You Need Is A Usb Arduino, Usb A-b Cable, And An Led.join The Tens Of Thousands Of Hobbyists Who Have Discovered This Incredible (and Educational) Platform. Written By The Co-founder Of The Arduino Project, With Illustrations By Elisa Canducci, Getting Started With Arduino Gets You In On The Fun! This 128-page Book Is A Greatly Expanded Follow-up To The Author's Original Short Pdf That's Available On The Arduino Website.
BibTeX:
@book{banzi-getting:2008,
  author = {Banzi, Massimo},
  title = {Getting Started With Arduino},
  publisher = {Make Books - Imprint Of: O'reilly Media},
  year = {2008},
  edition = {Ill}
}
Cully B, Lefebvre G, Meyer D, Feeley M, Hutchinson N and Warfield A (2008), "Remus: High availability via asynchronous virtual machine replication", In Proceedings of the 5th USENIX Symposium on Networked Systems Design and Implementation. , pp. 161-174. San Francisco.
BibTeX:
@inproceedings{cully-remus:2008,
  author = {Cully, Brendan and Lefebvre, Geoffrey and Meyer, Dutch and Feeley, Mike and Hutchinson, Norm and Warfield, Andrew},
  title = {Remus: High availability via asynchronous virtual machine replication},
  booktitle = {Proceedings of the 5th USENIX Symposium on Networked Systems Design and Implementation},
  publisher = {San Francisco},
  year = {2008},
  pages = {161--174},
  url = {https://www.usenix.org/legacy/event/nsdi08/tech/full_papers/cully/cully_html/}
}
Li Y, Thai MT and Wu W (2008), "Wireless Sensor Networks And Applications" Boston, Ma Springer Us.
BibTeX:
@book{li-wireless:2008,
  author = {Li, Yingshu And Thai, My T. And Wu, Weili},
  title = {Wireless Sensor Networks And Applications},
  publisher = {Springer Us},
  year = {2008},
  doi = {10.1007/978-0-387-49592-7}
}
Patrick DR and Fardo SW (2008), "Electrical Distribution Systems" The Fairmont Press, Inc..
BibTeX:
@book{patrick-electrical:2008,
  author = {Patrick, Dale R. And Fardo, Stephen W.},
  title = {Electrical Distribution Systems},
  publisher = {The Fairmont Press, Inc.},
  year = {2008}
}
Petersen K, Feldt R, Mujtaba S and Mattsson M (2008), "Systematic Mapping Studies in Software Engineering.", In EASE. Vol. 8, pp. 68-77.
BibTeX:
@inproceedings{petersen-systematic:2008,
  author = {Petersen, Kai and Feldt, Robert and Mujtaba, Shahid and Mattsson, Michael},
  title = {Systematic Mapping Studies in Software Engineering.},
  booktitle = {EASE},
  year = {2008},
  volume = {8},
  pages = {68--77},
  url = {http://robertfeldt.net/publications/petersen-ease08-sysmap-studies-in-se.pdf}
}
Tang Q, Gupta SKS and Varsamopoulos G (2008), "Energy-efficient Thermal-aware Task Scheduling For Homogeneous High-performance Computing Data Centers: A Cyber-physical Approach", IEEE Transactions On Parallel And Distributed Systems. Vol. 19(11), pp. 1458-1472.
Abstract: High-performance Computing Data Centers Have Been Rapidly Growing, Both In Number And Size. Thermal Management Of Data Centers Can Address Dominant Problems Associated With Cooling Such As The Recirculation Of Hot Air From The Equipment Outlets To Their Inlets And The Appearance Of Hot Spots. In This Paper, We Show Through Formalization That Minimizing The Peak Inlet Temperature Allows For The Lowest Cooling Power Needs. Using A Low-complexity Linear Heat Recirculation Model, We Define The Problem Of Minimizing The Peak Inlet Temperature Within A Data Center Through Task Assignment (mpit-ta), Consequently Leading To Minimal Cooling-requirement. We Also Provide Two Methods To Solve The Formulation: Xlnt-ga, Which Uses A Genetic Algorithm, And Xlnt-sqp, Which Uses Sequential Quadratic Programming. Results From Small-scale Data Center Simulations Show That Solving The Formulation Leads To An Inlet Temperature Distribution That, Compared To Other Approaches, Is 2 Degc To 5 Degc Lower And Achieves About 20 To 30 Percent Cooling Energy Savings At Common Data Center Utilization Rates. Moreover, Our Algorithms Consistently Outperform The Minimize Heat Recirculation Algorithm, A Recirculation-reducing Task Placement Algorithm In The Literature.
BibTeX:
@article{tang-energy-efficient:2008,
  author = {Tang, Q. And Gupta, S. K. S. And Varsamopoulos, G.},
  title = {Energy-efficient Thermal-aware Task Scheduling For Homogeneous High-performance Computing Data Centers: A Cyber-physical Approach},
  journal = {IEEE Transactions On Parallel And Distributed Systems},
  year = {2008},
  volume = {19},
  number = {11},
  pages = {1458--1472},
  doi = {10.1109/TPDS.2008.111}
}
Lin S, Liu J and Fang Y (2007), "Zigbee Based Wireless Sensor Networks And Its Applications In Industrial", In 2007 IEEE International Conference On Automation And Logistics., August, 2007. , pp. 1979-1983.
Abstract: Based On The Study Of The Characteristics Of Zigbee Technology, And The Analysis Of The Structure Of Wireless Sensor Networks. Proposed A New Reliable, Flexible And Inexpensive Wsn System Based On The Zigbee Technology. Its Structure That The Mac Layer And The Network Layer Of Zigbee Been Taken Over Wholly Is Given, And Its Node That Integrates The Wsn Nodes And Zigbee Module Together, Include Both The Hardware Implementation And The Software Implementation Based On Tinyos, Is Designed. At The End, Analyzed The Desired Characteristics Of The Wsn System Applied In Industrial And Proved That The New Wsn System Has A More Vast Range Of Prospects In The Application In Industrial.
BibTeX:
@inproceedings{lin-zigbee:2007,
  author = {Lin, Shizhuang And Liu, Jingyu And Fang, Yanjun},
  title = {Zigbee Based Wireless Sensor Networks And Its Applications In Industrial},
  booktitle = {2007 IEEE International Conference On Automation And Logistics},
  year = {2007},
  pages = {1979--1983},
  doi = {10.1109/ICAL.2007.4338898}
}
Bailey M, Eastwood M, Grieser T, Borovick L, Turner V and Gray R (2007), "International Data Corporation Special Study: Data Center Of The Future". April, 2007.
BibTeX:
@misc{bailey-idc:2007,
  author = {M Bailey And M Eastwood And T Grieser And L Borovick And V. Turner And R.c. Gray},
  title = {International Data Corporation Special Study: Data Center Of The Future},
  journal = {International Data Corporation -- Idc},
  year = {2007},
  number = {06c4799}
}
Koomey JG and Others (2007), "Estimating Total Power Consumption By Servers In The Us And The World".
BibTeX:
@misc{koomey:2007,
  author = {Koomey, Jonathan G And Others},
  title = {Estimating Total Power Consumption By Servers In The Us And The World},
  year = {2007}
}
Nathuji R and Schwan K (2007), "VirtualPower: Coordinated Power Management in Virtualized Enterprise Systems", In Proceedings of Twenty-first ACM SIGOPS Symposium on Operating Systems Principles. New York--NY, USA , pp. 265-278. ACM.
Abstract: Power management has become increasingly necessary in large-scale datacenters to address costs and limitations in cooling or power delivery. This paper explores how to integrate power management mechanisms and policies with the virtualization technologies being actively deployed in these environments. The goals of the proposed VirtualPower approach to online power management are (i) to support the isolated and independent operation assumed by guest virtual machines (VMs) running on virtualized platforms and (ii) to make it possible to control and globally coordinate the effects of the diverse power management policies applied by these VMs to virtualized resources. To attain these goals, VirtualPower extends to guest VMs `soft' versions of the hardware power states for which their policies are designed. The resulting technical challenge is to appropriately map VM-level updates made to soft power states to actual changes in the states or in the allocation of underlying virtualized hardware. An implementation of VirtualPower Management (VPM) for the Xen hypervisor addresses this challenge by provision of multiple system-level abstractions including VPM states, channels, mechanisms, and rules. Experimental evaluations on modern multicore platforms highlight resulting improvements in online power management capabilities, including minimization of power consumption with little or no performance penalties and the ability to throttle power consumption while still meeting application requirements. Finally, coordination of online methods for server consolidation with VPM management techniques in heterogeneous server systems is shown to provide up to 34% improvements in power consumption.
BibTeX:
@inproceedings{nathuji-virtualpower:2007,
  author = {Nathuji, Ripal and Schwan, Karsten},
  title = {VirtualPower: Coordinated Power Management in Virtualized Enterprise Systems},
  booktitle = {Proceedings of Twenty-first ACM SIGOPS Symposium on Operating Systems Principles},
  publisher = {ACM},
  year = {2007},
  pages = {265-278},
  url = {http://doi.acm.org/10.1145/1294261.1294287},
  doi = {10.1145/1294261.1294287}
}
Weibull (2007), "Reliability Basics: Availability and the Different Ways to Calculate It", Weibull: Reliability HotWire. Vol. 79
BibTeX:
@article{weibull-reliability:2007,
  author = {Weibull},
  title = {Reliability Basics: Availability and the Different Ways to Calculate It},
  journal = {Weibull: Reliability HotWire},
  year = {2007},
  volume = {79},
  url = {http://www.weibull.com/hotwire/issue79/relbasics79.htm}
}
Schlossnagle T (2006), "Scalable Internet Architectures", July, 2006. Pearson Education.
Abstract: As a developer, you are aware of the increasing concern amongst developers and site architects that websites be able to handle the vast number of visitors that flood the Internet on a daily basis. Scalable Internet Architectures addresses these concerns by teaching you both good and bad design methodologies for building new sites and how to scale existing websites to robust, high-availability websites. Primarily example-based, the book discusses major topics in web architectural design, presenting existing solutions and how they work. Technology budget tight? This book will work for you, too, as it introduces new and innovative concepts to solving traditionally expensive problems without a large technology budget. Using open source and proprietary examples, you will be engaged in best practice design methodologies for building new sites, as well as appropriately scaling both growing and shrinking sites. Website development help has arrived in the form of Scalable Internet Architectures.
BibTeX:
@book{schlossnagle-scalable:2006,
  author = {Schlossnagle, Theo},
  title = {Scalable Internet Architectures},
  publisher = {Pearson Education},
  year = {2006}
}
Jia W and Zhou W (2006), "Distributed Network Systems: From Concepts To Implementations", June, 2006. Springer.
Abstract: This Volume Covers Both Theoretical And Practical Aspects Of Distributed Computing. It Describes The Client-server Model For Developing Distributed Network Systems, The Communication Paradigms Used In A Distributed Network System, And The Principles Of Reliability And Security In The Design Of Distributed Network Systems. Based On Theoretical Introductions, The Book Presents Various Implementation Strategies And Techniques For Building Distributed Network Systems, Including Examples In Tcp/ip Communications, The Use Of Remote Procedure Call And Remote Method Invocation Techniques, And The Development Of Web-based Applications, Distributed Databases, And Mobile Computing Systems.
BibTeX:
@book{jia-distributed:2006,
  author = {Jia, Weijia And Zhou, Wanlei},
  title = {Distributed Network Systems: From Concepts To Implementations},
  publisher = {Springer},
  year = {2006}
}
Helal AA, Heddaya AA and Bhargava BB (2006), "Replication Techniques in Distributed Systems", April, 2006. Springer Science & Business Media.
Abstract: Replication Techniques in Distributed Systems organizes and surveys the spectrum of replication protocols and systems that achieve high availability by replicating entities in failure-prone distributed computing environments. The entities discussed in this book vary from passive untyped data objects, to typed and complex objects, to processes and messages. Replication Techniques in Distributed Systems contains definitions and introductory material suitable for a beginner, theoretical foundations and algorithms, an annotated bibliography of commercial and experimental prototype systems, as well as short guides to recommended further readings in specialized subtopics. This book can be used as recommended or required reading in graduate courses in academia, as well as a handbook for designers and implementors of systems that must deal with replication issues in distributed systems.
BibTeX:
@book{helal-replication:2006,
  author = {Helal, Abdelsalam A. and Heddaya, Abdelsalam A. and Bhargava, Bharat B.},
  title = {Replication Techniques in Distributed Systems},
  publisher = {Springer Science & Business Media},
  year = {2006}
}
Balloni A(o) (2006), "Por Que Gesiti: Por Que Gestão Em Sistemas E Tecnologias De Informação?" Komedi.
BibTeX:
@book{balloni:2006,
  author = {Balloni, A. (org )},
  title = {Por Que Gesiti: Por Que Gestão Em Sistemas E Tecnologias De Informação?},
  publisher = {Komedi},
  year = {2006}
}
Fischer W and Mitasch C (2006), "High availability clustering of virtual machines–possibilities and pitfalls", Paper for the talk at the 12th Linuxtag, May 3rd-6th, Wiesbaden/Germany Version. Vol. 1
BibTeX:
@article{fischer-high:2006,
  author = {Fischer, Werner and Mitasch, Christoph},
  title = {High availability clustering of virtual machines–possibilities and pitfalls},
  journal = {Paper for the talk at the 12th Linuxtag, May 3rd-6th, Wiesbaden/Germany Version},
  year = {2006},
  volume = {1},
  url = {https://pdfs.semanticscholar.org/0072/1298260d3458b7d3555fa1e347e8ec11c84c.pdf}
}
Kohvakka M, Kuorilehto M, Hännikäinen M and Hämäläinen TD (2006), "Performance Analysis Of IEEE 802.15.4 And Zigbee For Large-scale Wireless Sensor Network Applications", In Proceedings Of The 3rd ACM International Workshop On Performance Evaluation Of Wireless Ad Hoc, Sensor And Ubiquitous Networks. New York, Ny, Usa , pp. 48-57. ACM.
BibTeX:
@inproceedings{kohvakka-performance:2006,
  author = {Kohvakka, Mikko And Kuorilehto, Mauri And Hännikäinen, Marko And Hämäläinen, Timo D.},
  title = {Performance Analysis Of IEEE 802.15.4 And Zigbee For Large-scale Wireless Sensor Network Applications},
  booktitle = {Proceedings Of The 3rd ACM International Workshop On Performance Evaluation Of Wireless Ad Hoc, Sensor And Ubiquitous Networks},
  publisher = {ACM},
  year = {2006},
  pages = {48--57},
  doi = {10.1145/1163610.1163619}
}
Sacco P (2006), "The Difference Between Data Centers And Computer Rooms". Thesis at: Global Knowledge. , pp. 09.
BibTeX:
@techreport{sacco:2006,
  author = {Sacco, Peter},
  title = {The Difference Between Data Centers And Computer Rooms},
  school = {Global Knowledge},
  year = {2006},
  pages = {09},
  url = {http://datacentersblog.com/wp-content/uploads/2010/09/The-Difference-between-a-Data-Center-and-a-Computer-Room.pdf}
}
Trappe W and Washington LC (2006), "Introduction To Cryptography: With Coding Theory" Upper Saddle River, N.j. Pearson Prentice Hall.
Abstract: This Text Is For A Course In Cryptography For Advanced Undergraduate And Graduate Students. Material Is Accessible To Mathematically Mature Students Having Little Background In Number Theory And Computer Programming. Core Material Is Treated In The First Eight Chapters On Areas Such As Classical Cryptosystems, Basic Number Theory, The Rsa Algorithm, And Digital Signatures. The Remaining Nine Chapters Cover Optional Topics Including Secret Sharing Schemes, Games, And Information Theory. Appendices Contain Computer Examples In Mathematica, Maple, And Matlab. The Text Can Be Taught Without Computers.
BibTeX:
@book{trappe-introduction:2006,
  author = {Trappe, Wade And Washington, Lawrence C},
  title = {Introduction To Cryptography: With Coding Theory},
  publisher = {Pearson Prentice Hall},
  year = {2006}
}
Bell MA (2005), "Use Best Practices To Design Data Center Facilities". Thesis at: Gartner Inc., April, 2005. (G00127434), pp. 26.
Abstract: Data Centers Seldom Meet The Operational And Capacity Requirements Of Their Initial Designs. The Principal Goals In Data Center Design Are Flexibility And Scalability, Which Involve Site Location, Building Selection, Floor Layout, Electrical System Design, Mechanical Design And Modularity.
BibTeX:
@techreport{bell-use:2005,
  author = {Bell, Michael A.},
  title = {Use Best Practices To Design Data Center Facilities},
  school = {Gartner Inc},
  year = {2005},
  number = {G00127434},
  pages = {26},
  url = {https://www.gartner.com/doc/476880/use-best-practices-design-data}
}
TIA-942 (2005), "ANSI/TIA-942 - Telecommunications Infrastructure Standard For Data Centers". Thesis at: Telecom. Industry Assossiation (tia). (942), pp. 148.
Abstract: This standard specifies the minimum requirements for telecommunications infrastructure of data centers and computer rooms, including single tenant enterprise data centers and multi-tenant internet hosting data centers. The topology specified in this document is intended to be applicable to any size data center.
BibTeX:
@techreport{tia-942:2005,
  author = {TIA-942,},
  title = {ANSI/TIA-942 - Telecommunications Infrastructure Standard For Data Centers},
  school = {Telecom. Industry Assossiation (tia)},
  year = {2005},
  number = {942},
  pages = {148}
}
Kitchenham B (2004), "Kitchenham, 2004 Procedures for Performing Systematic Reviews" Keele University Technical Report TR/SE-0401.
BibTeX:
@book{kitchenham-procedures:2004,
  author = {Kitchenham, Barbara},
  title = {Kitchenham, 2004 Procedures for Performing Systematic Reviews},
  publisher = {Keele University Technical Report TR/SE-0401},
  year = {2004}
}
Ganek AG and Corbi TA (2003), "The Dawning Of The Autonomic Computing Era", Ibm Systems Journal. Vol. 42(1), pp. 5-18.
Abstract: This Issue Of The Ibm Systems Journal Explores A Broad Set Of Ideas And Approaches To Autonomic Computing--some First Steps In What We See As A Journey To Create More Self-managing Computing Systems. Autonomic Computing Represents A Collection And Integration Of Technologies That Enable The Creation Of An Information Technology Computing Infrastructure For Ibm's Agenda For The Next Era Of Computing--e-business On Demand. This Paper Presents An Overview Of Ibm's Autonomic Computing Initiative. It Examines The Genesis Of Autonomic Computing, The Industry And Marketplace Drivers, The Fundamental Characteristics Of Autonomic Systems, A Framework For How Systems Will Evolve To Become More Self-managing, And The Key Role For Open Industry Standards Needed To Support Autonomic Behavior In Heterogeneous System Environments. Technologies Explored In Each Of The Papers Presented In This Issue Are Introduced For The Reader.
BibTeX:
@article{ganek-dawning:2003,
  author = {Ganek, A. G. And Corbi, T. A.},
  title = {The Dawning Of The Autonomic Computing Era},
  journal = {Ibm Systems Journal},
  year = {2003},
  volume = {42},
  number = {1},
  pages = {5--18},
  doi = {10.1147/sj.421.0005}
}
EVO-WORLD O (2002), "International Performance Measurement and Verification Protocol: Concepts and Options for Determining Energy and Water Savings". Thesis at: U.S. Department of Energy, EVO-World. ({DOE}/{GO}-102002-1554)
BibTeX:
@techreport{evo-world-international:2002,
  author = {EVO-WORLD, , Org},
  title = {International Performance Measurement and Verification Protocol: Concepts and Options for Determining Energy and Water Savings},
  school = {U.S. Department of Energy, EVO-World},
  year = {2002},
  number = {DOE/GO-102002-1554},
  url = {https://eric.ed.gov/?id=ED144007}
}
Oppenheim C, Morris A, McKnight C and Lowley S (2000), "The evaluation of WWW search engines", Journal of Documentation., April, 2000. Vol. 56(2), pp. 190-211.
BibTeX:
@article{oppenheim-evaluation:2000,
  author = {Oppenheim, C. and Morris, A. and McKnight, C. and Lowley, S.},
  title = {The evaluation of WWW search engines},
  journal = {Journal of Documentation},
  year = {2000},
  volume = {56},
  number = {2},
  pages = {190-211},
  url = {http://www.emeraldinsight.com/doi/abs/10.1108/00220410010803810},
  doi = {10.1108/00220410010803810}
}
Maier U, Stellner G and Zoraja I (1998), "Resource Allocation, Scheduling And Load Balancing Based On The Pvm Resource Manager", Advances In Parallel Computing. Vol. 12, pp. 711-718.
Abstract: This Work Describes Some Concepts How To Use The Pvm Resource Manager Interface To Implement A Resource Management System For Pvm Applications In A Network Of Workstations (now). The Basic Implementation Of A Pvm Resource Manager Has Been Extended By A Variety Of Functions To Define Strategies For Resource Allocation And Scheduling Of Pvm Applications. The Users Of Pvm Applications Benefit From The Improved User-friendliness And Efficiency In Resource Utilization.11 This Work Has Been Funded By The German Federal Department Of Education, Science, Research And Technology, Bmbp (bundesministerium Für Bildung, Wissenschaft, Forschung Und Technologie) Within The Research Project Sempa (software Engineering Methods For Parallel Applications In Scientific Computing).
BibTeX:
@article{maier-resource:1998,
  author = {Maier, Ursula And Stellner, Georg And Zoraja, Ivan},
  title = {Resource Allocation, Scheduling And Load Balancing Based On The Pvm Resource Manager},
  journal = {Advances In Parallel Computing},
  year = {1998},
  volume = {12},
  pages = {711--718},
  url = {http://www.sciencedirect.com/science/article/pii/S0927545298800932}
}
Barringer HP (1997), "Availability, Reliability, Maintainability, and Capability" , pp. 11.
Abstract: Availability, reliability, maintainability, and capability are components of the effectiveness equation. The
effectiveness equation is a figure of merit which is helpful for deciding which component(s) detract from
performance measures. In many continuous process plants the reliability component is the largest
detractor from better performance. Calculation of the components are illustrated by use of a small data
set.
BibTeX:
@article{barringer-availability:1997,
  author = {Barringer, H. P.},
  title = {Availability, Reliability, Maintainability, and Capability},
  year = {1997},
  pages = {11},
  url = {https://pdfs.semanticscholar.org/973c/a4acc67b5d0b13c79c98653e23d8b0a2289e.pdf}
}
Shirazi BA, Hurson AR and Kavi KM (1995), "Scheduling And Load Balancing In Parallel And Distributed Systems" Wiley.
Abstract: Advances In Hardware And Software Technologies Have Led To An Increased Interest In The Use Of Large-scale Parallel And Distributed Systems For Database, Real-time, Defense, And Large-scale Commercial Applications. One Of The Biggest System Issues Is Developing Effective Techniques For The Distribution Of Multiple Program Processes On Multiple Processors. This Book Discusses How To Schedule The Processes Among Processing Elements To Achieve The Expected Performance Goals, Such As Minimizing Execution Time, Minimizing Communication Delays, Or Maximizing Resource Utilization. This Book Focuses On The Future Directions Of The Static Scheduling And Dynamic Load Balancing Methods In Parallel And Distributed Systems. It Provides An Overview And A Detailed Discussion On A Wide Range Of Topics From Theoretical Background To Practical, State-of-the-art Scheduling And Load Balancing Techniques. The Book Will Be A Useful Guide To Industry Professionals, Academic Professors, And Students Who Are Interested In These Important Aspects Of Parallel And Distributed Systems. Also, It Will Be Helpful To Those Working On Research And Development In Parallel Processing Applications, Compilers And Operating Systems, System Design, And Software Tools For Parallel Program Development.
BibTeX:
@book{shirazi-scheduling:1995,
  author = {Shirazi, Behrooz A. And Hurson, Ali R. And Kavi, Krishna M.},
  title = {Scheduling And Load Balancing In Parallel And Distributed Systems},
  publisher = {Wiley},
  year = {1995}
}
Doyle J and Green R (1994), "Strategic Choice And Data Envelopment Analysis: Comparing Computers Across Many Attributes", J Inf Technol. Vol. 9(1), pp. 61-69.
Abstract: A Linear Programming Approach (data Envelopment Analysis) Is Described To Determine The Relative Merits Of A Set Of Multi-input, Multi-output Systems, In Which More Output For Less Input Is Considered Good. The Method Is Applied To Benchmarks Of Microcomputers, And Is Contrasted With A Multiple Regression Analysis Of The Same Data. It Is Also Argued That The Essence Of Two Opposing Strategic Outlooks Can Be Captured Within The Method.
BibTeX:
@article{doyle-strategic:1994,
  author = {Doyle, John And Green, Rodney},
  title = {Strategic Choice And Data Envelopment Analysis: Comparing Computers Across Many Attributes},
  journal = {J Inf Technol},
  year = {1994},
  volume = {9},
  number = {1},
  pages = {61--69},
  doi = {10.1057/jit.1994.7}
}
Aggarwal KK (1993), "Reliability Engineering", October, 1993. Springer Science & Business Media.
Abstract: Modern society depends heavily upon a host of systems of varying complexity to perform the services required. The importance of reliability assumes new dimensions, primarily because of the higher cost of these highly complex machines required by mankind and the implication of their failure. This is why all industrial organizations wish to equip their scientists, engineers, managers and administrators with a knowledge of reliability concepts and applications. Based on the author's 20 years experience as reliability educator, researcher and consultant, Reliability Engineering introduces the reader systematically to reliability evaluation, prediction, allocation and optimization. It also covers further topics, such as maintainability and availability, software reliability, economics of reliability, reliability management, reliability testing, etc. A reliability study of some typical systems has been included to introduce the reader to the practical aspects. The book is intended for graduate students of engineering schools and also professional engineers, managers and reliability administrators as it has a wide coverage of reliability concepts.
BibTeX:
@book{aggarwal-reliability:1993,
  author = {Aggarwal, K. K.},
  title = {Reliability Engineering},
  publisher = {Springer Science & Business Media},
  year = {1993}
}
Martello S and Toth P (1990), "Lower bounds and reduction procedures for the bin packing problem", Discrete Applied Mathematics., July, 1990. Vol. 28(1), pp. 59-70.
Abstract: The bin packing problem, in which a set of items of various sizes has to be packed into a minimum number of identical bins, has been extensively studied during the past fifteen years, mainly with the aim of finding fast heuristic algorithms to provide good approximate solutions. We present lower bounds and a dominance criterion and derive a reduction algorithm. Lower bounds are evaluated through an extension of the concept of worst-case performance. For both lower bounds and reduction algorithm an experimental analysis is provided.
BibTeX:
@article{martello-lower:1990,
  author = {Martello, Silvano and Toth, Paolo},
  title = {Lower bounds and reduction procedures for the bin packing problem},
  journal = {Discrete Applied Mathematics},
  year = {1990},
  volume = {28},
  number = {1},
  pages = {59--70},
  url = {http://www.sciencedirect.com/science/article/pii/0166218X9090094S},
  doi = {10.1016/0166-218X(90)90094-S}
}
Vesely W, Goldberg F, Roberts N and Haasl D (1980), "NRC: Fault Tree Handbook (NUREG-0492)". Thesis at: U.S. Nuclear Regulatory Commission. Washington, DC, USA (NUREG-0492)
Abstract: Since 1975, a short course entitled "System Safety and Reliability Analysis" has been presented to over 200 NRC personnel and contractors. The course has been taught jointly by David F. Haasl, Institute of System Sciences, Professor Norman H. Roberts, University of Washington, and members of the Probabilistic Analysis Staff, NRC, as part of a risk assessment training program sponsored by the Probabilistic Analysis Staff. This handbook has been developed not only to serve as text for the System Safety and Reliability Course, but also to make available to others a set of otherwise undocumented material on fault tree construction and evaluation. The publication of this handbook is in accordance with the recommendations of the Risk Assessment Review Group Report (NUREG/CR-0400) in which it was stated that the fault/event tree methodology both can and should be used more widely by the NRC. It is hoped that this document will help to codify and systematize the fault tree approach to systems analysis.
BibTeX:
@techreport{vesely-nrc:1980,
  author = {Vesely, W.E. and Goldberg, F.F. and Roberts, N.H. and Haasl, D.F.},
  title = {NRC: Fault Tree Handbook (NUREG-0492)},
  school = {U.S. Nuclear Regulatory Commission},
  year = {1980},
  number = {NUREG-0492},
  url = {https://www.nrc.gov/reading-rm/doc-collections/nuregs/staff/sr0492/}
}
Ullman JD (1975), "Np-complete Scheduling Problems", J. Comput. Syst. Sci.. Vol. 10(3), pp. 384-393.
Abstract: We Show That The Problem Of Finding An Optimal Schedule For A Set Of Jobs Is Np-complete Even In The Following Two Restricted Cases.o(1)all Jobs Require One Time Unit. (2)all Jobs Require One Or Two Time Units, And There Are Only Two Processor Resolving (in The Negative A Conjecture Of R. L. Graham, Proc. Sjcc, 1972, Pp. 205-218). As A Consequence, The General Preemptive Scheduling Problem Is Also Np-complete. These Results Are Tantamount To Showing That The Scheduling Problems Mentioned Are Intractable.
BibTeX:
@article{ullman-np-complete:1975,
  author = {Ullman, J. D.},
  title = {Np-complete Scheduling Problems},
  journal = {J. Comput. Syst. Sci.},
  year = {1975},
  volume = {10},
  number = {3},
  pages = {384--393},
  doi = {10.1016/S0022-0000(75)80008-0}
}
CCM (8), "Incluir Citação".
BibTeX:
@misc{ccm-8,
  author = {CCM},
  title = {Incluir Citação},
  year = {8}
}