From 560585c125ad5791b70198832beda699347ad03d Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 24 Sep 2024 00:50:08 +0000 Subject: [PATCH] Publications API was updated by GitHub Actions --- .../v1/10.1007/s10623-022-01028-0/index.json | 2 +- .../v1/10.1016/j.eswa.2021.116078/index.json | 6 +- .../v1/10.1038/s42003-020-01270-z/index.json | 2 +- .../v1/10.1093/comjnl/bxaa055/index.json | 2 +- .../v1/10.1109/ICIAFS.2014.7069624/index.json | 2 +- .../v1/10.1109/ICIAFS.2016.7946528/index.json | 2 +- .../ICIAfS52090.2021.9606093/index.json | 2 +- .../10.1109/ICIINFS.2011.6038120/index.json | 2 +- .../ICIIS53135.2021.9660702/index.json | 2 +- .../MERCon50084.2020.9185336/index.json | 2 +- .../v1/10.1109/MIES.2016.7780263/index.json | 2 +- .../v1/10.1109/TCAD.2015.2445736/index.json | 2 +- .../v1/10.1145/3412382.3458269/index.json | 2 +- .../v1/10.1155/2019/8162475/index.json | 2 +- .../v1/10.1186/s12859-020-03697-x/index.json | 2 +- .../v1/10.1186/s41182-021-00325-z/index.json | 4 +- .../10.1371/journal.pntd.0009756/index.json | 2 +- .../10.1371/journal.pone.0278440/index.json | 2 +- .../v1/10.1515/jmc-2019-0014/index.json | 2 +- .../licej.2040.2589.2015.0268/index.json | 2 +- .../v1/10.3390/electronics9091525/index.json | 2 +- .../v1/10.3991/ijim.v10i2.4817/index.json | 2 +- .../v1/10.7873/DATE.2013.152/index.json | 4 +- publications/v1/all/index.json | 56 ++--- .../v1/filter/research-groups/index.json | 28 +-- publications/v1/filter/staff/index.json | 34 +-- publications/v1/filter/students/index.json | 8 +- publications/v1/filter/tags/index.json | 228 +++++++++--------- 28 files changed, 204 insertions(+), 204 deletions(-) diff --git a/publications/v1/10.1007/s10623-022-01028-0/index.json b/publications/v1/10.1007/s10623-022-01028-0/index.json index 87633bb0..0f2cfe97 100644 --- a/publications/v1/10.1007/s10623-022-01028-0/index.json +++ b/publications/v1/10.1007/s10623-022-01028-0/index.json @@ -2,7 +2,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" diff --git a/publications/v1/10.1016/j.eswa.2021.116078/index.json b/publications/v1/10.1016/j.eswa.2021.116078/index.json index f7c62fe4..ec2fd4b4 100644 --- a/publications/v1/10.1016/j.eswa.2021.116078/index.json +++ b/publications/v1/10.1016/j.eswa.2021.116078/index.json @@ -2,12 +2,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -38,7 +38,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", diff --git a/publications/v1/10.1038/s42003-020-01270-z/index.json b/publications/v1/10.1038/s42003-020-01270-z/index.json index 5505dcea..8b50c460 100644 --- a/publications/v1/10.1038/s42003-020-01270-z/index.json +++ b/publications/v1/10.1038/s42003-020-01270-z/index.json @@ -2,7 +2,7 @@ "title": "Genopo: a nanopore sequencing analysis toolkit for portable Android devices", "venue": "Communications Biology", "year": "2020", - "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u00e2\u0080\u0089min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", + "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u2009min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", "authors": [ "Hiruna Samarakoon", "Sanoj Punchihewa", diff --git a/publications/v1/10.1093/comjnl/bxaa055/index.json b/publications/v1/10.1093/comjnl/bxaa055/index.json index 2e44bb70..d530607d 100644 --- a/publications/v1/10.1093/comjnl/bxaa055/index.json +++ b/publications/v1/10.1093/comjnl/bxaa055/index.json @@ -2,7 +2,7 @@ "title": "Public-key encryption in the standard model against strong leakage adversary", "venue": "The Computer Journal", "year": "2020", - "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u00e2\u0088\u00921 bits per leakage query, for a security parameter k and a statistical security parameter n\u00e2\u0081\u00a0, such that logp\u00e2\u0089\u00a5k and n is a function of k\u00e2\u0081\u00a0. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", + "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u22121 bits per leakage query, for a security parameter k and a statistical security parameter n\u2060, such that logp\u2265k and n is a function of k\u2060. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", "authors": [ "Janaka Alawatugoda" ], diff --git a/publications/v1/10.1109/ICIAFS.2014.7069624/index.json b/publications/v1/10.1109/ICIAFS.2014.7069624/index.json index 8ebb1203..fcdd07da 100644 --- a/publications/v1/10.1109/ICIAFS.2014.7069624/index.json +++ b/publications/v1/10.1109/ICIAFS.2014.7069624/index.json @@ -1,5 +1,5 @@ { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", diff --git a/publications/v1/10.1109/ICIAFS.2016.7946528/index.json b/publications/v1/10.1109/ICIAFS.2016.7946528/index.json index e2a19e4d..94d9e9e7 100644 --- a/publications/v1/10.1109/ICIAFS.2016.7946528/index.json +++ b/publications/v1/10.1109/ICIAFS.2016.7946528/index.json @@ -2,7 +2,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", diff --git a/publications/v1/10.1109/ICIAfS52090.2021.9606093/index.json b/publications/v1/10.1109/ICIAfS52090.2021.9606093/index.json index 7f862232..ecef052a 100644 --- a/publications/v1/10.1109/ICIAfS52090.2021.9606093/index.json +++ b/publications/v1/10.1109/ICIAfS52090.2021.9606093/index.json @@ -1,5 +1,5 @@ { - "title": "Revealing MicroRNA Biomarkers for Alzheimer\u00e2\u0080\u0099s Disease Using Next Generation Sequencing Data", + "title": "Revealing MicroRNA Biomarkers for Alzheimer\u2019s Disease Using Next Generation Sequencing Data", "venue": "2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2021", "abstract": "", diff --git a/publications/v1/10.1109/ICIINFS.2011.6038120/index.json b/publications/v1/10.1109/ICIINFS.2011.6038120/index.json index c70387b5..a537f896 100644 --- a/publications/v1/10.1109/ICIINFS.2011.6038120/index.json +++ b/publications/v1/10.1109/ICIINFS.2011.6038120/index.json @@ -2,7 +2,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", diff --git a/publications/v1/10.1109/ICIIS53135.2021.9660702/index.json b/publications/v1/10.1109/ICIIS53135.2021.9660702/index.json index 6ca7fa56..214e2d5b 100644 --- a/publications/v1/10.1109/ICIIS53135.2021.9660702/index.json +++ b/publications/v1/10.1109/ICIIS53135.2021.9660702/index.json @@ -2,7 +2,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", diff --git a/publications/v1/10.1109/MERCon50084.2020.9185336/index.json b/publications/v1/10.1109/MERCon50084.2020.9185336/index.json index bcbefac8..88a16e22 100644 --- a/publications/v1/10.1109/MERCon50084.2020.9185336/index.json +++ b/publications/v1/10.1109/MERCon50084.2020.9185336/index.json @@ -1,5 +1,5 @@ { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", diff --git a/publications/v1/10.1109/MIES.2016.7780263/index.json b/publications/v1/10.1109/MIES.2016.7780263/index.json index ca1b9185..2fd99a8e 100644 --- a/publications/v1/10.1109/MIES.2016.7780263/index.json +++ b/publications/v1/10.1109/MIES.2016.7780263/index.json @@ -2,7 +2,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", diff --git a/publications/v1/10.1109/TCAD.2015.2445736/index.json b/publications/v1/10.1109/TCAD.2015.2445736/index.json index c6d07f32..09702673 100644 --- a/publications/v1/10.1109/TCAD.2015.2445736/index.json +++ b/publications/v1/10.1109/TCAD.2015.2445736/index.json @@ -2,7 +2,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", diff --git a/publications/v1/10.1145/3412382.3458269/index.json b/publications/v1/10.1145/3412382.3458269/index.json index e64df6e9..bf038ce0 100644 --- a/publications/v1/10.1145/3412382.3458269/index.json +++ b/publications/v1/10.1145/3412382.3458269/index.json @@ -2,7 +2,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", diff --git a/publications/v1/10.1155/2019/8162475/index.json b/publications/v1/10.1155/2019/8162475/index.json index 822a127b..a7e6bb68 100644 --- a/publications/v1/10.1155/2019/8162475/index.json +++ b/publications/v1/10.1155/2019/8162475/index.json @@ -2,7 +2,7 @@ "title": "Comparison of optimization-and rule-based EMS for domestic PV-Battery installation with time-Varying local SoC limits", "venue": "Journal of Electrical and Computer Engineering", "year": "2019", - "abstract": "Renewable energy is identified as a solution for the growing future electricity demand. Photovoltaic (PV) is a leading type of renewable energy source used for electricity generation. Among the PV systems, distributed PV systems are becoming popular among the domestic consumers and hence the number of domestic PV installations is on the rise continuously. Intermittent output power variations and inability to use the PV power during the night peak hours are major issues with PV systems. Energy storage is a possible mitigation technique for these issues. In order to effectively utilize local generations, storage, and loads, energy management system (EMS) becomes an essential component in future domestic PV installations. EMS for domestic consumers needs to be inexpensive, while a reasonable accuracy level is maintained. In this paper, optimization problem-based EMS and rule-based EMS were developed and compared to investigate the accuracy and the processing speed, thereby to select a fast and accurate EMS for a domestic PV installation. Furthermore, in the proposed EMS, a day-ahead generation and load profiles are generated from predictions, and thus the battery\u00e2\u0080\u0099s state of charge (SoC) levels over a day is estimated through the EMS. In order to utilize the storage effectively, time-varying local maximum and minimum SoC limits for the battery are introduced, which are inside the global maximum and minimum SoC limits. With the aid of real-PV profiles and typical loading profiles, the EMS was implemented using optimization- and rule-based techniques with local SoC limits. The results verified that the rule-based EMS produced accurate results in comparison to optimization-based EMS with lesser processing time. Further results verified that the introduction of local SoC limits improved the performance of the EMS in the unforeseen conditions.", + "abstract": "Renewable energy is identified as a solution for the growing future electricity demand. Photovoltaic (PV) is a leading type of renewable energy source used for electricity generation. Among the PV systems, distributed PV systems are becoming popular among the domestic consumers and hence the number of domestic PV installations is on the rise continuously. Intermittent output power variations and inability to use the PV power during the night peak hours are major issues with PV systems. Energy storage is a possible mitigation technique for these issues. In order to effectively utilize local generations, storage, and loads, energy management system (EMS) becomes an essential component in future domestic PV installations. EMS for domestic consumers needs to be inexpensive, while a reasonable accuracy level is maintained. In this paper, optimization problem-based EMS and rule-based EMS were developed and compared to investigate the accuracy and the processing speed, thereby to select a fast and accurate EMS for a domestic PV installation. Furthermore, in the proposed EMS, a day-ahead generation and load profiles are generated from predictions, and thus the battery\u2019s state of charge (SoC) levels over a day is estimated through the EMS. In order to utilize the storage effectively, time-varying local maximum and minimum SoC limits for the battery are introduced, which are inside the global maximum and minimum SoC limits. With the aid of real-PV profiles and typical loading profiles, the EMS was implemented using optimization- and rule-based techniques with local SoC limits. The results verified that the rule-based EMS produced accurate results in comparison to optimization-based EMS with lesser processing time. Further results verified that the introduction of local SoC limits improved the performance of the EMS in the unforeseen conditions.", "authors": [ "Akila Herath", "Supun Kodituwakku", diff --git a/publications/v1/10.1186/s12859-020-03697-x/index.json b/publications/v1/10.1186/s12859-020-03697-x/index.json index 8b5eeb85..17f4293e 100644 --- a/publications/v1/10.1186/s12859-020-03697-x/index.json +++ b/publications/v1/10.1186/s12859-020-03697-x/index.json @@ -2,7 +2,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", diff --git a/publications/v1/10.1186/s41182-021-00325-z/index.json b/publications/v1/10.1186/s41182-021-00325-z/index.json index 8ff794e3..e817fe58 100644 --- a/publications/v1/10.1186/s41182-021-00325-z/index.json +++ b/publications/v1/10.1186/s41182-021-00325-z/index.json @@ -1,8 +1,8 @@ { - "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u00e2\u0080\u00932019", + "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u20132019", "venue": "Tropical medicine and health", "year": "2021", - "abstract": "[Background] Leptospirosis is a bacterial zoonosis. Leptospirosis incidence (LI) in Sri Lanka is high. Infected animals excrete leptospires into the environment via their urine. Survival of leptospires in the environment until they enter into a person and several other factors that influence leptospirosis transmission are dependent upon local weather. Past studies show that rainfall and other weather parameters are correlated with the LI in the Kandy district, Sri Lanka. El Ni\u00c3\u00b1o Southern Oscillation (ENSO), ENSO Modoki, and the Indian Ocean Dipole (IOD) are teleconnections known to be modulating rainfall in Sri Lanka. There is a severe dearth of published studies on the correlations between indices of these teleconnections and LI. [Methods] We acquired the counts of leptospirosis cases notified and midyear estimated population data of the Kandy district from 2004 to 2019, respectively, from weekly epidemiology reports of the Ministry of Health and Department of Census and Statistics of Sri Lanka. We estimated weekly and monthly LI of Kandy. We obtained weekly and monthly teleconnection indices data for the same period from the National Oceanic and Atmospheric Administration (NOAA) of the USA and Japan Agency for Marine-Earth Science and Technology (JAMSTEC). We performed wavelet time series analysis to determine correlations with lag periods between teleconnection indices and LI time series. Then, we did time-lagged detrended cross-correlation analysis (DCCA) to verify wavelet analysis results and to find the magnitudes of the correlations detected. [Results] Wavelet analysis displayed indices of ENSO, IOD, and ENSO Modoki were correlated with the LI of Kandy with 1.9\u00e2\u0080\u009311.5-month lags. Indices of ENSO showed two correlation patterns with Kandy LI. Time-lagged DCCA results show all indices of the three teleconnections studied were significantly correlated with the LI of Kandy with 2\u00e2\u0080\u00935-month lag periods. [Conclusions] Results of the two analysis methods generally agree indicating that ENSO and IOD modulate LI in Kandy by modulating local rainfall and probably other weather parameters. We recommend further studies about the ENSO Modoki and LI correlation in Sri Lanka. Monitoring for extreme teleconnection events and enhancing preventive measures during lag periods can blunt LI peaks that may follow.", + "abstract": "[Background] Leptospirosis is a bacterial zoonosis. Leptospirosis incidence (LI) in Sri Lanka is high. Infected animals excrete leptospires into the environment via their urine. Survival of leptospires in the environment until they enter into a person and several other factors that influence leptospirosis transmission are dependent upon local weather. Past studies show that rainfall and other weather parameters are correlated with the LI in the Kandy district, Sri Lanka. El Ni\u00f1o Southern Oscillation (ENSO), ENSO Modoki, and the Indian Ocean Dipole (IOD) are teleconnections known to be modulating rainfall in Sri Lanka. There is a severe dearth of published studies on the correlations between indices of these teleconnections and LI. [Methods] We acquired the counts of leptospirosis cases notified and midyear estimated population data of the Kandy district from 2004 to 2019, respectively, from weekly epidemiology reports of the Ministry of Health and Department of Census and Statistics of Sri Lanka. We estimated weekly and monthly LI of Kandy. We obtained weekly and monthly teleconnection indices data for the same period from the National Oceanic and Atmospheric Administration (NOAA) of the USA and Japan Agency for Marine-Earth Science and Technology (JAMSTEC). We performed wavelet time series analysis to determine correlations with lag periods between teleconnection indices and LI time series. Then, we did time-lagged detrended cross-correlation analysis (DCCA) to verify wavelet analysis results and to find the magnitudes of the correlations detected. [Results] Wavelet analysis displayed indices of ENSO, IOD, and ENSO Modoki were correlated with the LI of Kandy with 1.9\u201311.5-month lags. Indices of ENSO showed two correlation patterns with Kandy LI. Time-lagged DCCA results show all indices of the three teleconnections studied were significantly correlated with the LI of Kandy with 2\u20135-month lag periods. [Conclusions] Results of the two analysis methods generally agree indicating that ENSO and IOD modulate LI in Kandy by modulating local rainfall and probably other weather parameters. We recommend further studies about the ENSO Modoki and LI correlation in Sri Lanka. Monitoring for extreme teleconnection events and enhancing preventive measures during lag periods can blunt LI peaks that may follow.", "authors": [ "NDB Ehelepola", "Kusalika Ariyaratne", diff --git a/publications/v1/10.1371/journal.pntd.0009756/index.json b/publications/v1/10.1371/journal.pntd.0009756/index.json index b61223e3..c445b3df 100644 --- a/publications/v1/10.1371/journal.pntd.0009756/index.json +++ b/publications/v1/10.1371/journal.pntd.0009756/index.json @@ -2,7 +2,7 @@ "title": "An accurate mathematical model predicting number of dengue cases in tropics", "venue": "PLoS Neglected Tropical Diseases", "year": "2021", - "abstract": "Dengue fever is a systemic viral infection of epidemic proportions in tropical countries. The incidence of dengue fever is ever increasing and has doubled over the last few decades. Estimated 50million new cases are detected each year and close to 10000 deaths occur each year. Epidemics are unpredictable and unprecedented. When epidemics occur, health services are over whelmed leading to overcrowding of hospitals. At present there is no evidence that dengue epidemics can be predicted. Since the breeding of the dengue mosquito is directly influenced by environmental factors, it is plausible that epidemics could be predicted using weather data. We hypothesized that there is a mathematical relationship between incidence of dengue fever and environmental factors and if such relationship exists, new cases of dengue fever in the succeeding months can be predicted using weather data of the current month. We developed a mathematical model using machine learning technique. We used Island wide dengue epidemiology data, weather data and population density in developing the model. We used incidence of dengue fever, average rain fall, humidity, wind speed, temperature and population density of each district in the model. We found that the model is able to predict the incidence of dengue fever of a given month in a given district with precision (RMSE between 18- 35.3). Further, using weather data of a given month, the number of cases of dengue in succeeding months too can be predicted with precision (RMSE 10.4\u00e2\u0080\u009430). Health authorities can use existing weather data in predicting epidemics in the immediate future and therefore measures to prevent new cases can be taken and more importantly the authorities can prepare local authorities for outbreaks.", + "abstract": "Dengue fever is a systemic viral infection of epidemic proportions in tropical countries. The incidence of dengue fever is ever increasing and has doubled over the last few decades. Estimated 50million new cases are detected each year and close to 10000 deaths occur each year. Epidemics are unpredictable and unprecedented. When epidemics occur, health services are over whelmed leading to overcrowding of hospitals. At present there is no evidence that dengue epidemics can be predicted. Since the breeding of the dengue mosquito is directly influenced by environmental factors, it is plausible that epidemics could be predicted using weather data. We hypothesized that there is a mathematical relationship between incidence of dengue fever and environmental factors and if such relationship exists, new cases of dengue fever in the succeeding months can be predicted using weather data of the current month. We developed a mathematical model using machine learning technique. We used Island wide dengue epidemiology data, weather data and population density in developing the model. We used incidence of dengue fever, average rain fall, humidity, wind speed, temperature and population density of each district in the model. We found that the model is able to predict the incidence of dengue fever of a given month in a given district with precision (RMSE between 18- 35.3). Further, using weather data of a given month, the number of cases of dengue in succeeding months too can be predicted with precision (RMSE 10.4\u201430). Health authorities can use existing weather data in predicting epidemics in the immediate future and therefore measures to prevent new cases can be taken and more importantly the authorities can prepare local authorities for outbreaks.", "authors": [ "Chathurangi Edussuriya", "Sampath Deegalla", diff --git a/publications/v1/10.1371/journal.pone.0278440/index.json b/publications/v1/10.1371/journal.pone.0278440/index.json index d8334cb2..d72a2b0f 100644 --- a/publications/v1/10.1371/journal.pone.0278440/index.json +++ b/publications/v1/10.1371/journal.pone.0278440/index.json @@ -93,7 +93,7 @@ "Control Systems", "Agriculture" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "api_url": "https://api.ce.pdn.ac.lk/publications/v1/10.1371/journal.pone.0278440/", "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1371/journal.pone.0278440/index.json", "submitted": "2023/06/01 10:14:54" diff --git a/publications/v1/10.1515/jmc-2019-0014/index.json b/publications/v1/10.1515/jmc-2019-0014/index.json index 589bf3b0..7b2dd7d5 100644 --- a/publications/v1/10.1515/jmc-2019-0014/index.json +++ b/publications/v1/10.1515/jmc-2019-0014/index.json @@ -2,7 +2,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", diff --git a/publications/v1/10.20533/licej.2040.2589.2015.0268/index.json b/publications/v1/10.20533/licej.2040.2589.2015.0268/index.json index 5fbfd9e9..90134b49 100644 --- a/publications/v1/10.20533/licej.2040.2589.2015.0268/index.json +++ b/publications/v1/10.20533/licej.2040.2589.2015.0268/index.json @@ -2,7 +2,7 @@ "title": "Novel Way of Using Mobile Phone for an Outside Science Learning Activity", "venue": "Literacy Information and Computer Education Journal (LICEJ)", "year": "2015", - "abstract": "Present-day mobile phones combine the capabilities of communication devices, cameras and computers. Even though their educational values are well established, their widespread use is hindered by the general fears about students\u00e2\u0080\u0099 misuse of mobile phones and other barriers such as their operating cost and teachers\u00e2\u0080\u0099 inexperience of using them. As a solution to circumvent the barriers of using mobile phones, in this study a set of mobile phones which is connected in a private network using Wi-Fi is considered. An IT application that can be used to manage the private network was developed. A lesson was conducted outside the classroom. Both students and teachers agreed that the mobile phones in a private network supported the outside learning activity and provided a novel learning experience.", + "abstract": "Present-day mobile phones combine the capabilities of communication devices, cameras and computers. Even though their educational values are well established, their widespread use is hindered by the general fears about students\u2019 misuse of mobile phones and other barriers such as their operating cost and teachers\u2019 inexperience of using them. As a solution to circumvent the barriers of using mobile phones, in this study a set of mobile phones which is connected in a private network using Wi-Fi is considered. An IT application that can be used to manage the private network was developed. A lesson was conducted outside the classroom. Both students and teachers agreed that the mobile phones in a private network supported the outside learning activity and provided a novel learning experience.", "authors": [ "TMSSKY Ekanayake", "KB Samarakoon", diff --git a/publications/v1/10.3390/electronics9091525/index.json b/publications/v1/10.3390/electronics9091525/index.json index 535c54ad..9ba478e1 100644 --- a/publications/v1/10.3390/electronics9091525/index.json +++ b/publications/v1/10.3390/electronics9091525/index.json @@ -1,5 +1,5 @@ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", diff --git a/publications/v1/10.3991/ijim.v10i2.4817/index.json b/publications/v1/10.3991/ijim.v10i2.4817/index.json index bf426943..4c9391a7 100644 --- a/publications/v1/10.3991/ijim.v10i2.4817/index.json +++ b/publications/v1/10.3991/ijim.v10i2.4817/index.json @@ -2,7 +2,7 @@ "title": "Support of Mobile Phones in a Private Network for Science Teaching", "venue": "International Journal of Interactive Mobile Technologies", "year": "2016", - "abstract": "The potential of mobile phones to facilitate students\u00e2\u0080\u0099 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u00e2\u0080\u0098Waves and their Characteristics\u00e2\u0080\u0099 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u00e2\u0080\u0099 misuse of mobile phones.", + "abstract": "The potential of mobile phones to facilitate students\u2019 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u2018Waves and their Characteristics\u2019 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u2019 misuse of mobile phones.", "authors": [ "Sakunthala Yatigammana Ekanayake", "Kamalanath Samarakoon" diff --git a/publications/v1/10.7873/DATE.2013.152/index.json b/publications/v1/10.7873/DATE.2013.152/index.json index 1398d42a..412f3721 100644 --- a/publications/v1/10.7873/DATE.2013.152/index.json +++ b/publications/v1/10.7873/DATE.2013.152/index.json @@ -10,7 +10,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -69,7 +69,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", diff --git a/publications/v1/all/index.json b/publications/v1/all/index.json index be0620b4..1de64863 100644 --- a/publications/v1/all/index.json +++ b/publications/v1/all/index.json @@ -359,7 +359,7 @@ "title": "Genopo: a nanopore sequencing analysis toolkit for portable Android devices", "venue": "Communications Biology", "year": "2020", - "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u00e2\u0080\u0089min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", + "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u2009min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", "authors": [ "Hiruna Samarakoon", "Sanoj Punchihewa", @@ -477,7 +477,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -686,7 +686,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -1029,7 +1029,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -1339,7 +1339,7 @@ "submitted": "2022/06/27 03:34:23" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -2185,7 +2185,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -2244,7 +2244,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -2389,7 +2389,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -2472,7 +2472,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -2850,7 +2850,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -3305,7 +3305,7 @@ "title": "Novel Way of Using Mobile Phone for an Outside Science Learning Activity", "venue": "Literacy Information and Computer Education Journal (LICEJ)", "year": "2015", - "abstract": "Present-day mobile phones combine the capabilities of communication devices, cameras and computers. Even though their educational values are well established, their widespread use is hindered by the general fears about students\u00e2\u0080\u0099 misuse of mobile phones and other barriers such as their operating cost and teachers\u00e2\u0080\u0099 inexperience of using them. As a solution to circumvent the barriers of using mobile phones, in this study a set of mobile phones which is connected in a private network using Wi-Fi is considered. An IT application that can be used to manage the private network was developed. A lesson was conducted outside the classroom. Both students and teachers agreed that the mobile phones in a private network supported the outside learning activity and provided a novel learning experience.", + "abstract": "Present-day mobile phones combine the capabilities of communication devices, cameras and computers. Even though their educational values are well established, their widespread use is hindered by the general fears about students\u2019 misuse of mobile phones and other barriers such as their operating cost and teachers\u2019 inexperience of using them. As a solution to circumvent the barriers of using mobile phones, in this study a set of mobile phones which is connected in a private network using Wi-Fi is considered. An IT application that can be used to manage the private network was developed. A lesson was conducted outside the classroom. Both students and teachers agreed that the mobile phones in a private network supported the outside learning activity and provided a novel learning experience.", "authors": [ "TMSSKY Ekanayake", "KB Samarakoon", @@ -3358,7 +3358,7 @@ "title": "Support of Mobile Phones in a Private Network for Science Teaching", "venue": "International Journal of Interactive Mobile Technologies", "year": "2016", - "abstract": "The potential of mobile phones to facilitate students\u00e2\u0080\u0099 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u00e2\u0080\u0098Waves and their Characteristics\u00e2\u0080\u0099 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u00e2\u0080\u0099 misuse of mobile phones.", + "abstract": "The potential of mobile phones to facilitate students\u2019 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u2018Waves and their Characteristics\u2019 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u2019 misuse of mobile phones.", "authors": [ "Sakunthala Yatigammana Ekanayake", "Kamalanath Samarakoon" @@ -3485,7 +3485,7 @@ "title": "Comparison of optimization-and rule-based EMS for domestic PV-Battery installation with time-Varying local SoC limits", "venue": "Journal of Electrical and Computer Engineering", "year": "2019", - "abstract": "Renewable energy is identified as a solution for the growing future electricity demand. Photovoltaic (PV) is a leading type of renewable energy source used for electricity generation. Among the PV systems, distributed PV systems are becoming popular among the domestic consumers and hence the number of domestic PV installations is on the rise continuously. Intermittent output power variations and inability to use the PV power during the night peak hours are major issues with PV systems. Energy storage is a possible mitigation technique for these issues. In order to effectively utilize local generations, storage, and loads, energy management system (EMS) becomes an essential component in future domestic PV installations. EMS for domestic consumers needs to be inexpensive, while a reasonable accuracy level is maintained. In this paper, optimization problem-based EMS and rule-based EMS were developed and compared to investigate the accuracy and the processing speed, thereby to select a fast and accurate EMS for a domestic PV installation. Furthermore, in the proposed EMS, a day-ahead generation and load profiles are generated from predictions, and thus the battery\u00e2\u0080\u0099s state of charge (SoC) levels over a day is estimated through the EMS. In order to utilize the storage effectively, time-varying local maximum and minimum SoC limits for the battery are introduced, which are inside the global maximum and minimum SoC limits. With the aid of real-PV profiles and typical loading profiles, the EMS was implemented using optimization- and rule-based techniques with local SoC limits. The results verified that the rule-based EMS produced accurate results in comparison to optimization-based EMS with lesser processing time. Further results verified that the introduction of local SoC limits improved the performance of the EMS in the unforeseen conditions.", + "abstract": "Renewable energy is identified as a solution for the growing future electricity demand. Photovoltaic (PV) is a leading type of renewable energy source used for electricity generation. Among the PV systems, distributed PV systems are becoming popular among the domestic consumers and hence the number of domestic PV installations is on the rise continuously. Intermittent output power variations and inability to use the PV power during the night peak hours are major issues with PV systems. Energy storage is a possible mitigation technique for these issues. In order to effectively utilize local generations, storage, and loads, energy management system (EMS) becomes an essential component in future domestic PV installations. EMS for domestic consumers needs to be inexpensive, while a reasonable accuracy level is maintained. In this paper, optimization problem-based EMS and rule-based EMS were developed and compared to investigate the accuracy and the processing speed, thereby to select a fast and accurate EMS for a domestic PV installation. Furthermore, in the proposed EMS, a day-ahead generation and load profiles are generated from predictions, and thus the battery\u2019s state of charge (SoC) levels over a day is estimated through the EMS. In order to utilize the storage effectively, time-varying local maximum and minimum SoC limits for the battery are introduced, which are inside the global maximum and minimum SoC limits. With the aid of real-PV profiles and typical loading profiles, the EMS was implemented using optimization- and rule-based techniques with local SoC limits. The results verified that the rule-based EMS produced accurate results in comparison to optimization-based EMS with lesser processing time. Further results verified that the introduction of local SoC limits improved the performance of the EMS in the unforeseen conditions.", "authors": [ "Akila Herath", "Supun Kodituwakku", @@ -3565,10 +3565,10 @@ "submitted": "2022/07/27 08:25:55" }, { - "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u00e2\u0080\u00932019", + "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u20132019", "venue": "Tropical medicine and health", "year": "2021", - "abstract": "[Background] Leptospirosis is a bacterial zoonosis. Leptospirosis incidence (LI) in Sri Lanka is high. Infected animals excrete leptospires into the environment via their urine. Survival of leptospires in the environment until they enter into a person and several other factors that influence leptospirosis transmission are dependent upon local weather. Past studies show that rainfall and other weather parameters are correlated with the LI in the Kandy district, Sri Lanka. El Ni\u00c3\u00b1o Southern Oscillation (ENSO), ENSO Modoki, and the Indian Ocean Dipole (IOD) are teleconnections known to be modulating rainfall in Sri Lanka. There is a severe dearth of published studies on the correlations between indices of these teleconnections and LI. [Methods] We acquired the counts of leptospirosis cases notified and midyear estimated population data of the Kandy district from 2004 to 2019, respectively, from weekly epidemiology reports of the Ministry of Health and Department of Census and Statistics of Sri Lanka. We estimated weekly and monthly LI of Kandy. We obtained weekly and monthly teleconnection indices data for the same period from the National Oceanic and Atmospheric Administration (NOAA) of the USA and Japan Agency for Marine-Earth Science and Technology (JAMSTEC). We performed wavelet time series analysis to determine correlations with lag periods between teleconnection indices and LI time series. Then, we did time-lagged detrended cross-correlation analysis (DCCA) to verify wavelet analysis results and to find the magnitudes of the correlations detected. [Results] Wavelet analysis displayed indices of ENSO, IOD, and ENSO Modoki were correlated with the LI of Kandy with 1.9\u00e2\u0080\u009311.5-month lags. Indices of ENSO showed two correlation patterns with Kandy LI. Time-lagged DCCA results show all indices of the three teleconnections studied were significantly correlated with the LI of Kandy with 2\u00e2\u0080\u00935-month lag periods. [Conclusions] Results of the two analysis methods generally agree indicating that ENSO and IOD modulate LI in Kandy by modulating local rainfall and probably other weather parameters. We recommend further studies about the ENSO Modoki and LI correlation in Sri Lanka. Monitoring for extreme teleconnection events and enhancing preventive measures during lag periods can blunt LI peaks that may follow.", + "abstract": "[Background] Leptospirosis is a bacterial zoonosis. Leptospirosis incidence (LI) in Sri Lanka is high. Infected animals excrete leptospires into the environment via their urine. Survival of leptospires in the environment until they enter into a person and several other factors that influence leptospirosis transmission are dependent upon local weather. Past studies show that rainfall and other weather parameters are correlated with the LI in the Kandy district, Sri Lanka. El Ni\u00f1o Southern Oscillation (ENSO), ENSO Modoki, and the Indian Ocean Dipole (IOD) are teleconnections known to be modulating rainfall in Sri Lanka. There is a severe dearth of published studies on the correlations between indices of these teleconnections and LI. [Methods] We acquired the counts of leptospirosis cases notified and midyear estimated population data of the Kandy district from 2004 to 2019, respectively, from weekly epidemiology reports of the Ministry of Health and Department of Census and Statistics of Sri Lanka. We estimated weekly and monthly LI of Kandy. We obtained weekly and monthly teleconnection indices data for the same period from the National Oceanic and Atmospheric Administration (NOAA) of the USA and Japan Agency for Marine-Earth Science and Technology (JAMSTEC). We performed wavelet time series analysis to determine correlations with lag periods between teleconnection indices and LI time series. Then, we did time-lagged detrended cross-correlation analysis (DCCA) to verify wavelet analysis results and to find the magnitudes of the correlations detected. [Results] Wavelet analysis displayed indices of ENSO, IOD, and ENSO Modoki were correlated with the LI of Kandy with 1.9\u201311.5-month lags. Indices of ENSO showed two correlation patterns with Kandy LI. Time-lagged DCCA results show all indices of the three teleconnections studied were significantly correlated with the LI of Kandy with 2\u20135-month lag periods. [Conclusions] Results of the two analysis methods generally agree indicating that ENSO and IOD modulate LI in Kandy by modulating local rainfall and probably other weather parameters. We recommend further studies about the ENSO Modoki and LI correlation in Sri Lanka. Monitoring for extreme teleconnection events and enhancing preventive measures during lag periods can blunt LI peaks that may follow.", "authors": [ "NDB Ehelepola", "Kusalika Ariyaratne", @@ -3638,7 +3638,7 @@ "submitted": "2022/07/27 08:29:32" }, { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", @@ -4134,7 +4134,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -4580,7 +4580,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -4641,7 +4641,7 @@ "title": "Public-key encryption in the standard model against strong leakage adversary", "venue": "The Computer Journal", "year": "2020", - "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u00e2\u0088\u00921 bits per leakage query, for a security parameter k and a statistical security parameter n\u00e2\u0081\u00a0, such that logp\u00e2\u0089\u00a5k and n is a function of k\u00e2\u0081\u00a0. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", + "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u22121 bits per leakage query, for a security parameter k and a statistical security parameter n\u2060, such that logp\u2265k and n is a function of k\u2060. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", "authors": [ "Janaka Alawatugoda" ], @@ -4760,7 +4760,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -5311,7 +5311,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -5475,7 +5475,7 @@ "title": "An accurate mathematical model predicting number of dengue cases in tropics", "venue": "PLoS Neglected Tropical Diseases", "year": "2021", - "abstract": "Dengue fever is a systemic viral infection of epidemic proportions in tropical countries. The incidence of dengue fever is ever increasing and has doubled over the last few decades. Estimated 50million new cases are detected each year and close to 10000 deaths occur each year. Epidemics are unpredictable and unprecedented. When epidemics occur, health services are over whelmed leading to overcrowding of hospitals. At present there is no evidence that dengue epidemics can be predicted. Since the breeding of the dengue mosquito is directly influenced by environmental factors, it is plausible that epidemics could be predicted using weather data. We hypothesized that there is a mathematical relationship between incidence of dengue fever and environmental factors and if such relationship exists, new cases of dengue fever in the succeeding months can be predicted using weather data of the current month. We developed a mathematical model using machine learning technique. We used Island wide dengue epidemiology data, weather data and population density in developing the model. We used incidence of dengue fever, average rain fall, humidity, wind speed, temperature and population density of each district in the model. We found that the model is able to predict the incidence of dengue fever of a given month in a given district with precision (RMSE between 18- 35.3). Further, using weather data of a given month, the number of cases of dengue in succeeding months too can be predicted with precision (RMSE 10.4\u00e2\u0080\u009430). Health authorities can use existing weather data in predicting epidemics in the immediate future and therefore measures to prevent new cases can be taken and more importantly the authorities can prepare local authorities for outbreaks.", + "abstract": "Dengue fever is a systemic viral infection of epidemic proportions in tropical countries. The incidence of dengue fever is ever increasing and has doubled over the last few decades. Estimated 50million new cases are detected each year and close to 10000 deaths occur each year. Epidemics are unpredictable and unprecedented. When epidemics occur, health services are over whelmed leading to overcrowding of hospitals. At present there is no evidence that dengue epidemics can be predicted. Since the breeding of the dengue mosquito is directly influenced by environmental factors, it is plausible that epidemics could be predicted using weather data. We hypothesized that there is a mathematical relationship between incidence of dengue fever and environmental factors and if such relationship exists, new cases of dengue fever in the succeeding months can be predicted using weather data of the current month. We developed a mathematical model using machine learning technique. We used Island wide dengue epidemiology data, weather data and population density in developing the model. We used incidence of dengue fever, average rain fall, humidity, wind speed, temperature and population density of each district in the model. We found that the model is able to predict the incidence of dengue fever of a given month in a given district with precision (RMSE between 18- 35.3). Further, using weather data of a given month, the number of cases of dengue in succeeding months too can be predicted with precision (RMSE 10.4\u201430). Health authorities can use existing weather data in predicting epidemics in the immediate future and therefore measures to prevent new cases can be taken and more importantly the authorities can prepare local authorities for outbreaks.", "authors": [ "Chathurangi Edussuriya", "Sampath Deegalla", @@ -5528,12 +5528,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -5564,7 +5564,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -5994,7 +5994,7 @@ "submitted": "2023/03/08 07:36:57" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -6242,7 +6242,7 @@ "submitted": "2023/03/08 07:42:54" }, { - "title": "Revealing MicroRNA Biomarkers for Alzheimer\u00e2\u0080\u0099s Disease Using Next Generation Sequencing Data", + "title": "Revealing MicroRNA Biomarkers for Alzheimer\u2019s Disease Using Next Generation Sequencing Data", "venue": "2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2021", "abstract": "", @@ -6829,7 +6829,7 @@ "Control Systems", "Agriculture" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "api_url": "https://api.ce.pdn.ac.lk/publications/v1/10.1371/journal.pone.0278440/", "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1371/journal.pone.0278440/index.json", "submitted": "2023/06/01 10:14:54" diff --git a/publications/v1/filter/research-groups/index.json b/publications/v1/filter/research-groups/index.json index 9a52ab4b..99cc8fc6 100644 --- a/publications/v1/filter/research-groups/index.json +++ b/publications/v1/filter/research-groups/index.json @@ -4,7 +4,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -356,7 +356,7 @@ "title": "Genopo: a nanopore sequencing analysis toolkit for portable Android devices", "venue": "Communications Biology", "year": "2020", - "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u00e2\u0080\u0089min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", + "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u2009min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", "authors": [ "Hiruna Samarakoon", "Sanoj Punchihewa", @@ -532,7 +532,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2012.6304784/index.json" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -777,7 +777,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/CIBCB48159.2020.9277723/index.json" }, { - "title": "Revealing MicroRNA Biomarkers for Alzheimer\u00e2\u0080\u0099s Disease Using Next Generation Sequencing Data", + "title": "Revealing MicroRNA Biomarkers for Alzheimer\u2019s Disease Using Next Generation Sequencing Data", "venue": "2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2021", "abstract": "", @@ -1212,7 +1212,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -1557,7 +1557,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -1616,7 +1616,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -1759,7 +1759,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -1841,7 +1841,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -2422,7 +2422,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -2861,7 +2861,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -2921,7 +2921,7 @@ "title": "Public-key encryption in the standard model against strong leakage adversary", "venue": "The Computer Journal", "year": "2020", - "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u00e2\u0088\u00921 bits per leakage query, for a security parameter k and a statistical security parameter n\u00e2\u0081\u00a0, such that logp\u00e2\u0089\u00a5k and n is a function of k\u00e2\u0081\u00a0. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", + "abstract": "Over the years, security against adaptively chosen-ciphertext attacks (CCA2) is considered as the strongest security definition for public-key encryption schemes. With the uprise of side-channel attacks, new security definitions are proposed, addressing leakage of secret keys together with the standard CCA2 definition. Among the new security definitions, security against continuous and after-the-fact leakage-resilient CCA2 can be considered as the strongest security definition, which is called as security against (continuous) adaptively chosen-ciphertext leakage attacks (continuous CCLA2). In this paper, we present a construction of a public-key encryption scheme, namely LR-PKE, which satisfies the aforementioned security definition. The security of our public-key encryption scheme is proven in the standard model, under decision BDH assumption. Thus, we emphasize that our public-key encryption scheme LR-PKE is (continuous) CCLA2-secure in the standard model. For our construction of LR-PKE, we have used a strong one-time signature scheme and a leakage-resilient refreshing protocol as underlying building blocks. The leakage bound is 0.15nlogp\u22121 bits per leakage query, for a security parameter k and a statistical security parameter n\u2060, such that logp\u2265k and n is a function of k\u2060. It is possible to see that LR-PKE is efficient enough to be used for real-world usage.", "authors": [ "Janaka Alawatugoda" ], @@ -3038,7 +3038,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -3175,7 +3175,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", diff --git a/publications/v1/filter/staff/index.json b/publications/v1/filter/staff/index.json index 9bd31516..56264af5 100644 --- a/publications/v1/filter/staff/index.json +++ b/publications/v1/filter/staff/index.json @@ -1,7 +1,7 @@ { "alawatugoda@eng.pdn.ac.lk": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "authors": [ @@ -1304,7 +1304,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -1427,7 +1427,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2012.6304784/index.json" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "authors": [ @@ -1669,7 +1669,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/CIBCB48159.2020.9277723/index.json" }, { - "title": "Revealing MicroRNA Biomarkers for Alzheimer\u00e2\u0080\u0099s Disease Using Next Generation Sequencing Data", + "title": "Revealing MicroRNA Biomarkers for Alzheimer\u2019s Disease Using Next Generation Sequencing Data", "venue": "2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2021", "authors": [ @@ -2492,7 +2492,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "authors": [ @@ -4213,7 +4213,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1155/2019/8162475/index.json" }, { - "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u00e2\u0080\u00932019", + "title": "The correlation between three teleconnections and leptospirosis incidence in the Kandy District, Sri Lanka, 2004\u20132019", "venue": "Tropical medicine and health", "year": "2021", "authors": [ @@ -4463,7 +4463,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/TIC-STH.2009.5444462/index.json" }, { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "authors": [ @@ -5460,7 +5460,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "authors": [ @@ -5941,7 +5941,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -6000,7 +6000,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -7824,7 +7824,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2012.6304784/index.json" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "authors": [ @@ -7914,7 +7914,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/MERCon50084.2020.9185336/index.json" }, { - "title": "Revealing MicroRNA Biomarkers for Alzheimer\u00e2\u0080\u0099s Disease Using Next Generation Sequencing Data", + "title": "Revealing MicroRNA Biomarkers for Alzheimer\u2019s Disease Using Next Generation Sequencing Data", "venue": "2021 10th International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2021", "authors": [ @@ -9045,7 +9045,7 @@ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -9076,7 +9076,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -9518,7 +9518,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -9577,7 +9577,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -9880,7 +9880,7 @@ ], "upuljm@eng.pdn.ac.lk": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "authors": [ diff --git a/publications/v1/filter/students/index.json b/publications/v1/filter/students/index.json index a875d099..b56a191c 100644 --- a/publications/v1/filter/students/index.json +++ b/publications/v1/filter/students/index.json @@ -631,7 +631,7 @@ ], "E/14/054": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "authors": [ @@ -1433,7 +1433,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -2158,7 +2158,7 @@ ], "E/14/413": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "authors": [ @@ -2638,7 +2638,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", diff --git a/publications/v1/filter/tags/index.json b/publications/v1/filter/tags/index.json index f089187c..6e432bfc 100644 --- a/publications/v1/filter/tags/index.json +++ b/publications/v1/filter/tags/index.json @@ -486,7 +486,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -763,7 +763,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -845,7 +845,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -1168,7 +1168,7 @@ ], "Amino acids": [ { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -1932,7 +1932,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -2014,7 +2014,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -2240,7 +2240,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -2297,7 +2297,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIINFS.2011.6038120/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -2446,7 +2446,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -3195,7 +3195,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -3279,7 +3279,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -4232,7 +4232,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -5330,7 +5330,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -5780,7 +5780,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -6150,7 +6150,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -6255,7 +6255,7 @@ ], "Correlation": [ { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -6407,7 +6407,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -7081,7 +7081,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -7413,7 +7413,7 @@ ], "Databases": [ { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -7778,7 +7778,7 @@ ], "Dementia": [ { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -8177,7 +8177,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -8764,7 +8764,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -8885,7 +8885,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -9029,7 +9029,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -9598,7 +9598,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -9934,7 +9934,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIIS53135.2021.9660737/index.json" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -10594,7 +10594,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -10712,7 +10712,7 @@ ], "Forestry": [ { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -10808,7 +10808,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -10915,7 +10915,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -11161,7 +11161,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -11345,7 +11345,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -11563,7 +11563,7 @@ "title": "Genopo: a nanopore sequencing analysis toolkit for portable Android devices", "venue": "Communications Biology", "year": "2020", - "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u00e2\u0080\u0089min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", + "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u2009min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", "authors": [ "Hiruna Samarakoon", "Sanoj Punchihewa", @@ -12021,7 +12021,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -12264,7 +12264,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -12444,7 +12444,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -12813,7 +12813,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -12895,7 +12895,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -13617,12 +13617,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -13653,7 +13653,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -13686,7 +13686,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -14883,7 +14883,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -14965,7 +14965,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -15114,7 +15114,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -15173,7 +15173,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -15536,7 +15536,7 @@ ], "IoT": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", @@ -15732,7 +15732,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -16072,7 +16072,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -16740,7 +16740,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -16800,7 +16800,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -17409,7 +17409,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -17491,7 +17491,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -17937,7 +17937,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -18019,7 +18019,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -18481,7 +18481,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -18573,7 +18573,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -19031,7 +19031,7 @@ "title": "Support of Mobile Phones in a Private Network for Science Teaching", "venue": "International Journal of Interactive Mobile Technologies", "year": "2016", - "abstract": "The potential of mobile phones to facilitate students\u00e2\u0080\u0099 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u00e2\u0080\u0098Waves and their Characteristics\u00e2\u0080\u0099 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u00e2\u0080\u0099 misuse of mobile phones.", + "abstract": "The potential of mobile phones to facilitate students\u2019 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u2018Waves and their Characteristics\u2019 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u2019 misuse of mobile phones.", "authors": [ "Sakunthala Yatigammana Ekanayake", "Kamalanath Samarakoon" @@ -19310,7 +19310,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -19417,7 +19417,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -19524,7 +19524,7 @@ "title": "Genopo: a nanopore sequencing analysis toolkit for portable Android devices", "venue": "Communications Biology", "year": "2020", - "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u00e2\u0080\u0089min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", + "abstract": "The advent of portable nanopore sequencing devices has enabled DNA and RNA sequencing to be performed in the field or the clinic. However, advances in in situ genomics require parallel development of portable, offline solutions for the computational analysis of sequencing data. Here we introduce Genopo, a mobile toolkit for nanopore sequencing analysis. Genopo compacts popular bioinformatics tools to an Android application, enabling fully portable computation. To demonstrate its utility for in situ genome analysis, we use Genopo to determine the complete genome sequence of the human coronavirus SARS-CoV-2 in nine patient isolates sequenced on a nanopore device, with Genopo executing this workflow in less than 30\u2009min per sample on a range of popular smartphones. We further show how Genopo can be used to profile DNA methylation in a human genome sample, illustrating a flexible, efficient architecture that is suitable to run many popular bioinformatics tools and accommodate small or large genomes. As the first ever smartphone application for nanopore sequencing analysis, Genopo enables the genomics community to harness this cheap, ubiquitous computational resource.", "authors": [ "Hiruna Samarakoon", "Sanoj Punchihewa", @@ -19725,12 +19725,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -19761,7 +19761,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -20471,7 +20471,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -20578,7 +20578,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -20660,7 +20660,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -21779,7 +21779,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -22680,7 +22680,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -22769,7 +22769,7 @@ ], "Principal component analysis": [ { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -22865,7 +22865,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -23125,7 +23125,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -23207,7 +23207,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -23299,7 +23299,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -23358,7 +23358,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -23744,7 +23744,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -24586,12 +24586,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -24622,7 +24622,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -24655,12 +24655,12 @@ "title": "Random subspace and random projection nearest neighbor ensembles for high dimensional data", "venue": "Elsevier Expert systems with applications", "year": "2022", - "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u00e2\u0080\u0099s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", + "abstract": "The random subspace and the random projection methods are investigated and compared as techniques for forming ensembles of nearest neighbor classifiers in high dimensional feature spaces. The two methods have been empirically evaluated on three types of high-dimensional datasets: microarrays, chemoinformatics, and images. Experimental results on 34 datasets show that both the random subspace and the random projection method lead to improvements in predictive performance compared to using the standard nearest neighbor classifier, while the best method to use depends on the type of data considered; for the microarray and chemoinformatics datasets, random projection outperforms the random subspace method, while the opposite holds for the image datasets. An analysis using data complexity measures, such as attribute to instance ratio and Fisher\u2019s discriminant ratio, provide some more detailed indications on what relative performance can be expected for specific datasets. The results also indicate that the resulting ensembles may be competitive with state-of-the-art ensemble classifiers; the nearest neighbor ensembles using random projection perform on par with random forests for the microarray and chemoinformatics datasets.", "authors": [ "Sampath Deegalla", "Keerthi Walgama", "Panagiotis Papapetrou", - "Henrik Bostr\u00c3\u00b6m" + "Henrik Bostr\u00f6m" ], "author_info": [ { @@ -24691,7 +24691,7 @@ "profile_url": "#" }, { - "name": "Henrik Bostr\u00c3\u00b6m", + "name": "Henrik Bostr\u00f6m", "profile": "#", "type": "OUTSIDER", "id": "", @@ -24946,7 +24946,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -25005,7 +25005,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -26011,7 +26011,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -26070,7 +26070,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -26339,7 +26339,7 @@ "title": "Support of Mobile Phones in a Private Network for Science Teaching", "venue": "International Journal of Interactive Mobile Technologies", "year": "2016", - "abstract": "The potential of mobile phones to facilitate students\u00e2\u0080\u0099 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u00e2\u0080\u0098Waves and their Characteristics\u00e2\u0080\u0099 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u00e2\u0080\u0099 misuse of mobile phones.", + "abstract": "The potential of mobile phones to facilitate students\u2019 science learning, when they are engaging in group activities, was investigated. To minimize the disciplinary issues emerged from the previous research on mobile devices and to enhance the quality of learning, a set of mobile phones that are connected to a private network was used. The lesson planning and implementation through these mobile phones were facilitated by a web based Application. A purposively selected group of teachers developed three lessons while integrating mobile phones in a private network into learning activities. Then the lessons were implemented in real classroom settings. This paper is based on one of the lessons \u2018Waves and their Characteristics\u2019 that was implemented for Grade 11 students. The data were collected through observations using audio, video and field notes and were analyzed using thematic analysis technique with the help of NVivo10 qualitative data analysis software. Based on the thematic analysis, two assertions were derived. Notably teachers appreciated the support of the private network in enhancing the quality of group learning activity while minimizing the students\u2019 misuse of mobile phones.", "authors": [ "Sakunthala Yatigammana Ekanayake", "Kamalanath Samarakoon" @@ -27297,7 +27297,7 @@ "title": "On implementing a client-server setting to prevent the Browser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH) attacks", "venue": "2016 Manufacturing & Industrial Engineering Symposium (MIES)", "year": "2016", - "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u00e2\u0080\u009cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u00e2\u0080\u009d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", + "abstract": "Compression is desirable for network applications as it saves bandwidth. Differently, when data is compressed before being encrypted, the amount of compression leaks information about the amount of redundancy in the plaintext. This side channel has led to the \u201cBrowser Reconnaissance and Exfiltration via Adaptive Compression of Hypertext (BREACH)\u201d attack on web traffic protected by the TLS protocol. The general guidance to prevent this attack is to disable HTTP compression, preserving confidentiality but sacrificing bandwidth. As a more sophisticated countermeasure, fixed-dictionary compression was introduced in 2015 enabling compression while protecting high-value secrets, such as cookies, from attacks. The fixed-dictionary compression method is a cryptographically sound countermeasure against the BREACH attack, since it is proven secure in a suitable security model. In this project, we integrate the fixed-dictionary compression method as a countermeasure for BREACH attack, for real-world client-server setting. Further, we measure the performance of the fixed-dictionary compression algorithm against the DEFLATE compression algorithm. The results evident that, it is possible to save some amount of bandwidth, with reasonable compression/decompression time compared to DEFLATE operations. The countermeasure is easy to implement and deploy, hence, this would be a possible direction to mitigate the BREACH attack efficiently, rather than stripping off the HTTP compression entirely.", "authors": [ "Isuru Sankalpa", "Tharindu Dhanushka", @@ -27684,7 +27684,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -28295,7 +28295,7 @@ "researchgroups": [ "Smart and Intelligent Systems (IoT / AI / Wearable Computing)" ], - "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u00e2\u0080\u0093 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", + "funding": "The Early Career Fellowship of the Organization for Women in Science for Developing World (OWSD, Early Career Fellowship https://owsd.net/) funded this project \u2013 the award agreement 4500406736 was awarded to Pradeepa C.G. Bandaranayake", "tags": [ "IoT", "Smart Farming", @@ -28311,7 +28311,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -28488,7 +28488,7 @@ "title": "Statechart based modeling and controller implementation of complex reactive systems", "venue": "2011 6th International Conference on Industrial and Information Systems (ICIIS)", "year": "2011", - "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u00e2\u0080\u009cTwidosuite\u00e2\u0080\u009d for different operating conditions and finally tested on the elevator system.", + "abstract": "Statechart formalism has been a preferred choice for modeling complex reactive systems (CRS) in recent years. It has inbuilt powerful features of orthogonality, hierarchy, intermodular communication and history. Once statechart based system modeling is done the next issues to be addressed are (1) modular verification of the system for failsafe operation under all possible working conditions (2) progressive controller implementation together with the supervisory control while maintaining traceability and re-configurability and (3) facilitation of controller adaptation for progressive incorporation of security features and supervisory specifications. An elevator system was designed and built to reflect exigencies of a typical CRS hardware/software platform. A controller was designed to meet the above requirements and tested on the platform to validate the feasibility of model-based control design/verification methodology for real scale systems. Modularity was achieved by developing the statechart model of the plant into a tree of communicating language generators. Progresively verified modules were then translated into sequential function charts (SFC) which were finally integrated to form a complete flat SFC. The SFC was then implemented on a PLC platform (Telemechanique). The program was first validated in simulation using Telemechanique \u201cTwidosuite\u201d for different operating conditions and finally tested on the elevator system.", "authors": [ "AC Vidanapathirana", "SD Dewasurendra", @@ -28607,7 +28607,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIInfS.2013.6732061/index.json" }, { - "title": "A structured hardware software architecture for peptide based diagnosis \u00e2\u0080\u0094 Sub-string matching problem with limited tolerance", + "title": "A structured hardware software architecture for peptide based diagnosis \u2014 Sub-string matching problem with limited tolerance", "venue": "2014 7th International Conference on Information and Automation for Sustainability", "year": "2014", "abstract": "The problem of inferring proteins from complex peptide samples in shotgun proteomic workflow sets extreme demands on computational resources in respect of the required very high processing throughputs, rapid processing rates and reliability of results. This is exacerbated by the fact that, in general, a given protein cannot be defined by a fixed sequence of amino acids due to the existence of splice variants and isoforms of that protein. Therefore, the problem of protein inference could be considered as one of identifying sequences of amino acids with some limited tolerance. Two problems arise from this: a) due to these (permitted) variations, the applicability of exact string matching methodologies could be questioned and b) the difficulty of defining a reference (peptide/amino acid) sequence for a particular set of proteins that are functionally indistinguishable, but with some variation in features. This paper presents a model-based hardware acceleration of a structured and practical inference approach that is developed and validated to solve the inference problem in a mass spectrometry experiment of realistic size. Our approach starts from an examination of the known set of splice variants and isoforms of a target protein to identify the Greatest Common Stable Substring (GCSS) of amino acids and the Substrings Subjects to Limited Variation (SSLV) and their respective locations on the GCSS. The hypothesis made here is that these latter substrings (SSLV) appear inside complete peptides and not cutting across peptide boundaries. Then we define and solve the Sub-string Matching Problem with Limited Tolerance (SMPLT) using the Bit-Split Aho Corasick Algorithm with Limited Tolerance (BSACLT) that we define and automate. This approach is validated on identified peptides in a labelled and clustered data set from UNIPROT. A model-based hardware software co-design strategy is used to accelerate the computational workflow of above described protein inference problem. Identification of Baylisascaris Procyonis infection was used as an application instance. This workflow can be generalised to any inexact multiple pattern matching application by replacing the patterns in a clustered and distributed environment which permits a distance between member strings to account for permitted deviations such as substitutions, insertions and deletions. The co-designed workflow achieved up to 70 times maximum speed-up compared to a similar workflow purely run on the processor used for co-design.", @@ -29388,7 +29388,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -29470,7 +29470,7 @@ "title": "Exploring multilevel cache hierarchies in application specific mpsocs", "venue": "IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems", "year": "2015", - "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00c3\u0097 higher in our method, which is still a mere 3.6 \u00c3\u0097 10 -5 % of the entire design space, and took 6.08 h.", + "abstract": "Multiprocessor systems make use of multilevel cache hierarchies to improve overall memory access speed. Embedded systems typically use configurable processors, where the caches in the system can be customized for a given application or a set of applications. Finding the optimal or a near-optimal set size, block size, and associativity of each of the caches in a multilevel cache hierarchy is a challenging task due to the presence of billions or even trillions of design points. This paper presents an iterative exploration method to find suitable configurations for all the caches in the hierarchy of an application specific multiprocessor system-on-chip, to improve memory access speed. We propose an algorithm and combine it with the use of specialized hardware for parallel cache simulation to enable multiple back-and-forth iterations through the cache levels. In every iteration, our algorithm explores selected portions of the entire design space to quickly converge upon the final design point. We demonstrate our methodology on two- and three-level cache hierarchies with private and shared caches in a quad-core system, respectively, consisting of 5.4 billion and 10.4 trillion design points. Our method was able to find design points with up to 18.9% lower average memory access time while reducing total cache size by up to 74.15%, compared to a state-of-the-art noniterative method. The number of design points explored was 4\u00d7 higher in our method, which is still a mere 3.6 \u00d7 10 -5 % of the entire design space, and took 6.08 h.", "authors": [ "Isuru Nawinne", "Haris Javaid", @@ -29675,7 +29675,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -29948,7 +29948,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -30323,7 +30323,7 @@ "edit_url": "https://github.com/cepdnaclk/api.ce.pdn.ac.lk/blob/main/publications/v1/10.1109/ICIINFS.2017.8300399/index.json" }, { - "title": "Detection of Novel Biomarker Genes of Alzheimer\u00e2\u0080\u0099s Disease Using Gene Expression Data", + "title": "Detection of Novel Biomarker Genes of Alzheimer\u2019s Disease Using Gene Expression Data", "venue": "2020 Moratuwa Engineering Research Conference (MERCon)", "year": "2020", "abstract": "It is well recognized, that most common form of dementia is Alzheimer's disease and a successful cure or medication is not discovered. A plethora of research has been conducted to understand the underlying mechanism and the pathogenesis of the Alzheimer's disease. To explore the underlying genetic structure of the disease, gene expression data is being used by many researches and computational and statistical approaches were used to identify possible genes that are risk. In this paper, we propose a machine learning framework that can be used to identify possible bio-marker genes. Our experiments discover possible set of 14 genes, which some of them are validated by biological sources. We also present a critical analysis of the propose machine learning framework using GSE5281 gene dataset.", @@ -31076,7 +31076,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -31589,7 +31589,7 @@ "Swarnalatha Radhakrishnan", "Roshan Ragel", "Jude Angelo Ambrose", - "J\u00c3\u00b6rg Henkel", + "J\u00f6rg Henkel", "Sri Parameswaran" ], "author_info": [ @@ -31648,7 +31648,7 @@ "profile_url": "#" }, { - "name": "J\u00c3\u00b6rg Henkel", + "name": "J\u00f6rg Henkel", "profile": "#", "type": "OUTSIDER", "id": "", @@ -32011,7 +32011,7 @@ "title": "Accelerating k-nn classification algorithm using graphics processing units", "venue": "2016 IEEE International Conference on Information and Automation for Sustainability (ICIAfS)", "year": "2016", - "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00c3\u0097 faster execution time compared to a CPU version.", + "abstract": "k-Nearest Neighbor (k-NN) is a well-known classification algorithm used in many machine learning applications. When the input data size of k-NN algorithm increases, the execution time of the algorithm increases significantly. Therefore, this becomes a bottleneck for practical usage. Since k-NN algorithm is using searching, sorting and other parallelly executable tasks, we have implemented the k-NN algorithm on a GPU using CUDA utilizing the parallel tasks. The results were promising as expected and for 43,500 training records and 14,500 testing records with nine attributes, the GPU execution showed about a 100\u00d7 faster execution time compared to a CPU version.", "authors": [ "S Selvaluxmiy", "TN Kumara", @@ -32318,7 +32318,7 @@ "title": "DeepLight: Robust & Unobtrusive Real-time Screen-Camera Communication for Real-World Displays", "venue": "2021 20th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "year": "2021", - "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u00e2\u0089\u00a5 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u00e2\u0089\u00a50.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", + "abstract": "The paper introduces a novel, holistic approach for robust Screen-Camera Communication (SCC), where video content on a screen is visually encoded in a human-imperceptible fashion and decoded by a camera capturing images of such screen content. We first show that state-of-the-art SCC techniques have two key limitations for in-the-wild deployment: (a) the decoding accuracy drops rapidly under even modest screen extraction errors from the captured images, and (b) they generate perceptible flickers on common refresh rate screens even with minimal modulation of pixel intensity. To overcome these challenges, we introduce DeepLight, a system that incorporates machine learning (ML) models in the decoding pipeline to achieve humanly-imperceptible, moderately high SCC rates under diverse real-world conditions. DeepLight's key innovation is the design of a Deep Neural Network (DNN) based decoder that collectively decodes all the bits spatially encoded in a display frame, without attempting to precisely isolate the pixels associated with each encoded bit. In addition, DeepLight supports imperceptible encoding by selectively modulating the intensity of only the Blue channel, and provides reasonably accurate screen extraction (IoU values \u2265 83%) by using state-of-the-art object detection DNN pipelines. We show that a fully functional DeepLight system is able to robustly achieve high decoding accuracy (frame error rate < 0.2) and moderately-high data goodput (\u22650.95 Kbps) using a human-held smartphone camera, even over larger screen-camera distances (~ 2m).", "authors": [ "Vu Tran", "Gihan Jayatilaka", @@ -32389,7 +32389,7 @@ "title": "Data Mining System for Predicting a Winning Cricket Team", "venue": "2021 IEEE 16th International Conference on Industrial and Information Systems (ICIIS)", "year": "2021", - "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u00e2\u0080\u0099 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", + "abstract": "Cricket is a two-team outdoor game that originated in England around the 19th century. This is played in 3 forms as twenty20, ODI, and Test matches. Due to the availability of data, researchers have been able to do statistical analysis of data for pattern recognition, to find factors affecting the game, and for outcome prediction. But due to the high uncertainty of the game, it has become very difficult to come up with a stable and accurate model. The outcome model also depends on the number of overs, match type, time period, and player combination among many other factors. This research focuses only on the ODI matches that were played between ICC full members; Australia, West Indies, Sri Lanka, Bangladesh, New Zealand, Ireland, India, Zimbabwe, Afghanistan, England, South Africa, and Pakistan. This outcome prediction is based on players\u2019 performances in a team and some features specific to the team and the match. The individual performance of batsmen, bowlers, and fielders are analyzed separately considering all-time ODI data. The combined performance of batsmen and bowlers was analyzed, and compared with individual performances using statistical methods. Association rule mining was used to find frequent winning player combinations. Match data from 2015 to 2020 were considered for the combined performance analysis and outcome prediction. For all these predictions we used data mining and machine learning techniques.", "authors": [ "Dinithi Hasanika", "Roshani Dilhara", @@ -33906,7 +33906,7 @@ ], "access management": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", @@ -34162,7 +34162,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -34375,7 +34375,7 @@ ], "blockchain": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", @@ -34714,7 +34714,7 @@ ], "data analytics": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.", @@ -34951,7 +34951,7 @@ "title": "Standard model leakage-resilient authenticated key exchange using inner-product extractors", "venue": "Designs, Codes and Cryptography", "year": "2022", - "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u00e2\u0080\u0093Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u00e2\u0080\u0093Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", + "abstract": "With the development of side-channel attacks, a necessity arises to invent authenticated key exchange protocols in a leakage-resilient manner. Constructing authenticated key exchange protocols using existing cryptographic schemes is an effective method, as such construction can be instantiated with any appropriate scheme in a way that the formal security argument remains valid. In parallel, constructing authenticated key exchange protocols that are proven to be secure in the standard model is more preferred as they rely on real-world assumptions. In this paper, we present a Diffie\u2013Hellman-style construction of a leakage-resilient authenticated key exchange protocol, that can be instantiated with any CCLA2-secure public-key encryption scheme and a function from the pseudo-random function family. Our protocol is proven to be secure in the standard model assuming the hardness of the decisional Diffie\u2013Hellman problem. Furthermore, it is resilient to continuous partial leakage of long-term secret keys, that happens even after the session key is established, while satisfying the security features defined by the eCK security model.", "authors": [ "Janaka Alawatugoda", "Tatsuaki Okamoto" @@ -35147,7 +35147,7 @@ "title": "GPU accelerated adaptive banded event alignment for rapid comparative nanopore signal analysis", "venue": "BMC Bioinformatics", "year": "2020", - "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u00e2\u0088\u00bc3-5 \u00c3\u0097 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", + "abstract": "[BACKGROUND:] Nanopore sequencing enables portable, real-time sequencing applications, including point-of-care diagnostics and in-the-field genotyping. Achieving these outcomes requires efficient bioinformatic algorithms for the analysis of raw nanopore signal data. However, comparing raw nanopore signals to a biological reference sequence is a computationally complex task. The dynamic programming algorithm called Adaptive Banded Event Alignment (ABEA) is a crucial step in polishing sequencing data and identifying non-standard nucleotides, such as measuring DNA methylation. Here, we parallelise and optimise an implementation of the ABEA algorithm (termed f5c) to efficiently run on heterogeneous CPU-GPU architectures. [RESULTS:] By optimising memory, computations and load balancing between CPU and GPU, we demonstrate how f5c can perform \u223c3-5 \u00d7 faster than an optimised version of the original CPU-only implementation of ABEA in the Nanopolish software package. We also show that f5c enables DNA methylation detection on-the-fly using an embedded System on Chip (SoC) equipped with GPUs. [CONCLUSIONS:] Our work not only demonstrates that complex genomics analyses can be performed on lightweight computing systems, but also benefits High-Performance Computing (HPC). The associated source code for f5c along with GPU optimised ABEA is available at https://github.com/hasindu2008/f5c.", "authors": [ "Hasindu Gamaarachchi", "Chun Wai Lam", @@ -36100,7 +36100,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -36316,7 +36316,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -36525,7 +36525,7 @@ "title": "New approach to practical leakage-resilient public-key cryptography", "venue": "Journal of Mathematical Cryptology", "year": "2020", - "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u00e2\u0080\u0093 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", + "abstract": "We present a new approach to construct several leakage-resilient cryptographic primitives, including leakage-resilient public-key encryption (PKE) schemes, authenticated key exchange (AKE) protocols and low-latency key exchange (LLKE) protocols. To this end, we introduce a new primitive called leakage-resilient non-interactive key exchange (LR-NIKE) protocol. We introduce an appropriate security model for LR-NIKE protocols in the bounded memory leakage (BML) settings. We then show a secure construction of the LR-NIKE protocol in the BML setting that achieves an optimal leakage rate, i.e., 1 \u2013 o(1). Our construction of LR-NIKE requires a minimal use of a leak-free hardware component. We argue that the use of such a leak-free hardware component seems to be unavoidable in any construction of an LR-NIKE protocol, even in the BML setting. Finally, we show how to construct the aforementioned leakage-resilient primitives from such an LR-NIKE protocol as summarized below. All these primitives also achieve the same (optimal) leakage rate as the underlying LR-NIKE protocol. We show how to construct a leakage-resilient (LR) IND-CCA-2-secure PKE scheme in the BML model generically from a bounded LR-NIKE (BLR-NIKE) protocol. Our construction of LR-IND-CCA-2 secure PKE differs significantly from the state-of-the-art constructions of these primitives, which mainly use hash proof techniques to achieve leakage resilience. Moreover, our transformation preserves the leakage-rate of the underlying BLR-NIKE protocol. We introduce a new leakage model for AKE protocols, in the BML setting, and present a leakage-resilient AKE protocol construction from the LR-NIKE protocol. We introduce the first-ever leakage model for LLKE protocols in the BML setting and the first construction of such a leakage-resilient LLKE from the LR-NIKE protocol.", "authors": [ "Suvradip Chakraborty", "Janaka Alawatugoda", @@ -37080,7 +37080,7 @@ ], "smart contracts": [ { - "title": "BAT\u00e2\u0080\u0094Block Analytics Tool Integrated with Blockchain Based IoT Platform", + "title": "BAT\u2014Block Analytics Tool Integrated with Blockchain Based IoT Platform", "venue": "Electronics", "year": "2020", "abstract": "The Internet of Things (IoT) is the novel paradigm of connectivity and the driving force behind state-of-the-art applications and services. However, the exponential growth of the number of IoT devices and services, their distributed nature, and scarcity of resources has increased the number of security and privacy concerns ranging from the risks of unauthorized data alterations to the potential discrimination enabled by data analytics over sensitive information. Thus, a blockchain based IoT-platform is introduced to address these issues. Built upon the tamper-proof architecture, the proposed access management mechanisms ensure the authenticity and integrity of data. Moreover, a novel approach called Block Analytics Tool (BAT), integrated with the platform is proposed to analyze and make predictions on data stored on the blockchain. BAT enables the data-analysis applications to be developed using the data stored in the platform in an optimized manner acting as an interface to off-chain processing. A pharmaceutical supply chain is used as the use case scenario to show the functionality of the proposed platform. Furthermore, a model to forecast the demand of the pharmaceutical drugs is investigated using a real-world data set to demonstrate the functionality of BAT. Finally, the performance of BAT integrated with the platform is evaluated.",