@article{hagh-etal-iot25,
author = {Hagh, Soheyl Faghir and Bourdeau, Jordan and Sober, Julia and Chertok, Rachael and Jepsen, Christopher and Amngostar, Parmida and Levine, Lucas and Forey, Casey and Xia, Tian and Skalka, Christian},
journal = {IEEE Internet of Things Journal},
title = {\textcolor{navy}{Embedded IoT System for Acoustic Precipitation Phase Partitioning via Edge ML and MFCCs}},
year = {2025},
volume = {},
number = {},
pages = {1-1},
abstract = {Accurate and real-time detection of precipitation phases is essential for hydrological modeling, water resource management, and climate impact assessments. However, conventional methods struggle to distinguish precipitation phase near freezing and are unsuitable for distributed deployment in remote, complex terrain due to cost, size, and power constraints. In this work, we present an integrated acoustic sensing system that combines machine learning on the edge with acoustic sensing and Mel-Frequency Cepstral Coefficients (MFCC)-based feature extraction, all implemented on our designed edge device. The device is low-cost, with LAN and WAN capabilities for near real-time reporting, and can integrate multiple sensors for snow hydrology studies, in addition to detecting the precipitation phase. We present the design, fabrication, and validation of the Advanced Unified Rainfall and Atmospheric Monitoring (AURA) system, integrating sensors for temperature, humidity, ultrasonic snow depth, solar radiation, and wind velocity. AURA features an embedded machine learning algorithm that enables accurate classification of precipitation phases. Short-time Fourier Transform (STFT) and MFCCs are computed on the edge from recorded precipitation acoustics via novel Mel filter bank computation methods. Using support vector machine (SVM) and random forest (RF) classifiers, the RF model achieves testing accuracy of 97.35\% on simulated precipitation acoustics and 85.67\% in consolidated class (CC) environmental recordings. The SVM classifier achieves 98.07\% accuracy on simulated acoustics and 85.99\% on CC environmental recordings. The low-power AURA network uses a LoRa star topology with TDMA and an Iridium gateway for reliable data transfer from remote sites. SNR analysis and comprehensive field tests confirm and validate system performance.},
doi = {10.1109/JIOT.2025.3613138}
}
@inproceedings{skalka-near-esop25,
author = {Christian Skalka and Joseph P. Near},
title = {\textcolor{navy}{SMT-Boosted Security Types for Low-Level MPC}},
booktitle = {Proceedings of the European Symposium on Programming},
year = 2025,
abstract = {Secure Multi-Party Computation (MPC) is an important enabling technology for data privacy in modern distributed applications. We develop a new type theory to automatically enforce correctness, confidentiality, and integrity properties of protocols written in the \emph{Prelude/Overture} language framework. Judgements in the type theory are predicated on SMT verifications in a theory of finite fields, which supports precise and efficient analysis. Our approach is automated, compositional, scalable, and generalizes to arbitrary prime fields for data and key sizes.},
pdf = {https://arxiv.org/abs/2501.17824}
}
@inproceedings{skalka-near-ppdp24,
title = {\textcolor{navy}{Language-Based Security for Low-Level MPC}},
author = {Christian Skalka and Joseph P. Near},
year = {2024},
booktitle = {Proceedings of the ACM Conference on Principles and Practice of Declarative Programming},
abstract = {Secure Multi-Party Computation (MPC) is an important enabling technology for data privacy in modern distributed applications. Currently, proof methods for low-level MPC protocols are primarily manual and thus tedious and error-prone, and are also non-standardized and unfamiliar to most PL theorists. As a step towards better language support and language-based enforcement, we develop a new staged PL for defining a variety of low-level probabilistic MPC protocols. We also formulate a collection of confidentiality and integrity hyperproperties for our language model that are familiar from information flow, including conditional noninterference, gradual release, and robust declassification. We demonstrate their relation to standard MPC threat models of passive and malicious security, and how they can be leveraged in security verification of protocols. To prove these properties we develop automated tactics in $\mathbb{F}_2$ that can be integrated with separation logic-style reasoning.},
pdf = {https://arxiv.org/abs/2407.16504}
}
@inproceedings{stevens-etal-usenix22,
author = {Timothy Stevens and Christian Skalka and Christelle Vincent and John Ring and Samuel Clark and Joseph Near},
title = {\textcolor{navy}{Efficient Differentially Private Secure Aggregation for Federated Learning via Hardness of Learning with Errors}},
booktitle = {31st {USENIX} Security Symposium, {USENIX} Security 2022},
publisher = {{USENIX} Association},
year = {2022},
abstract = {Federated machine learning leverages edge computing to develop models from network user data, but privacy in federated learning remains a major challenge. Techniques using differential privacy have been proposed to address this, but bring their own challenges- many require a trusted third party or else add too much noise to produce useful models. Recent advances in \emph{secure aggregation} using multiparty computation eliminate the need for a third party, but are computationally expensive especially at scale. We present a new federated learning protocol that leverages a novel differentially private, malicious secure aggregation protocol based on techniques from Learning With Errors. Our protocol outperforms current state-of-the art techniques, and empirical results show that it scales to a large number of parties, with optimal accuracy for any differentially private federated learning scheme},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/stevens-etal-usenix22.pdf}
}
@article{10.1145/3461462,
author = {Ring, John H. and Van Oort, Colin M. and Durst, Samson and White, Vanessa and Near, Joseph P. and Skalka, Christian},
title = {\textcolor{navy}{Methods for Host-Based Intrusion Detection with Deep Learning}},
year = {2021},
issue_date = {December 2021},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {2},
number = {4},
issn = {2692-1626},
url = {https://doi.org/10.1145/3461462},
doi = {10.1145/3461462},
abstract = {Host-based Intrusion Detection Systems (HIDS) automatically detect events that indicate compromise by adversarial applications. HIDS are generally formulated as analyses of sequences of system events such as bash commands or system calls. Anomaly-based approaches to HIDS leverage models of normal (a.k.a. baseline) system behavior to detect and report abnormal events and have the advantage of being able to detect novel attacks. In this article, we develop a new method for anomaly-based HIDS using deep learning predictions of sequence-to-sequence behavior in system calls. Our proposed method, called the ALAD algorithm, aggregates predictions at the application level to detect anomalies. We investigate the use of several deep learning architectures, including WaveNet and several recurrent networks. We show that ALAD empowered with deep learning significantly outperforms previous approaches. We train and evaluate our models using an existing dataset, ADFA-LD, and a new dataset of our own construction, PLAID. As deep learning models are black box in nature, we use an alternate approach, allotaxonographs, to characterize and understand differences in baseline vs. attack sequences in HIDS datasets such as PLAID.},
journal = {Digital Threats: Research and Practice},
month = {oct},
articleno = {26},
numpages = {29},
keywords = {deep learning, system calls, Host-based intrusion detection systems},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/ring-etal-dtrap21.pdf}
}
@article{info:doi/10.2196/16605,
author = {Stevens, Timothy
and McGinnis, Ryan S
and Hewgill, Blake
and Choquette, Rebecca H
and Tourville, Timothy W
and Harvey, Jean
and Lachapelle, Richard
and Beynnon, Bruce D
and Toth, Michael J
and Skalka, Christian},
title = {\textcolor{navy}{A Cyber-Physical System for Near Real-Time Monitoring of At-Home Orthopedic Rehabilitation and Mobile--Based Provider-Patient Communications to Improve Adherence: Development and Formative Evaluation}},
journal = {JMIR Hum Factors},
year = {2020},
month = {May},
day = {11},
volume = {7},
number = {2},
pages = {e16605},
keywords = {device use tracking; internet of things; neuromuscular electrical stimulation; exercise; smart devices; mHealth; rehabilitation; mobile health; digital health},
abstract = {Background: Knee extensor muscle performance is reduced after lower extremity trauma and orthopedic surgical interventions. At-home use of neuromuscular electrical stimulation (NMES) may improve functional recovery, but adherence to at-home interventions is low. Greater benefits from NMES may be realized with closer monitoring of adherence to at-home prescriptions and more frequent patient-provider interactions. Objective: This study aimed to develop a cyber-physical system to monitor at-home adherence to NMES prescription and facilitate patient-provider communications to improve adherence in near real time. Methods: The RehabTracker cyber-physical system was developed to accomplish this goal and comprises four components: (1) hardware modifications to a commercially available NMES therapy device to monitor device use and provide Bluetooth functionality; (2) an iPhone Operating System--based mobile health (mHealth) app that enables patient-provider communications in near real time; (3) a clinician portal to allow oversight of patient adherence with device use; and (4) a back-end server to store data, enable adherence analysis, and send automated push notifications to the patient. These four elements were designed to be fully compliant with the Health Insurance Portability and Accountability Act. The system underwent formative testing in a cohort of patients following anterior cruciate ligament rupture (n=7) to begin to assess face validity. Results: Compared with the NMES device software--tracked device use, the RehabTracker system recorded 83{\%} (40/48) of the rehabilitation sessions, with 100{\%} (32/32) of all sessions logged by the system in 4 out of 7 patients. In patients for whom tracking of automated push notifications was enabled, 100{\%} (29/29) of the push notifications sent by the back-end server were received by the patient. Process, hardware, and software issues contributing to these inaccuracies are detailed. Conclusions: RehabTracker represents a promising mHealth app for tracking and improving adherence with at-home NMES rehabilitation programs and warrants further refinement and testing. },
issn = {2292-9495},
doi = {10.2196/16605},
url = {http://humanfactors.jmir.org/2020/2/e16605/},
url = {https://doi.org/10.2196/16605},
url = {http://www.ncbi.nlm.nih.gov/pubmed/32384052}
}
@inproceedings{skalka-etal-csf20,
author = {Christian Skalka and David Darais and Trent Jaeger and Frank Capobianco},
title = {\textcolor{navy}{Types and Abstract Interpretation for Authorization Hook Advice}},
booktitle = {IEEE Computer Security Foundations Symposium (CSF)},
year = 2020,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-etal-csf20.pdf},
abstract = {Authorization hooks are access control checks that
prevent unauthorized principals from interacting
with some protected resource, and are used
extensively in critical software such as operating
systems, middleware, and server programs. They are
often intended to mediate information flow between
subjects (e.g., file owners), but typically in an
ad-hoc manner. In this paper we present a static
type and effect system for detecting whether
authorization hooks in programs properly defend
against undesired information flow between subjects.
A significant novelty of our approach is an
integrated abstract interpretation-based tool that
guides system clients through the information flow
consequences of access control policy decisions.}
}
@article{skalka-amir-clark-jcs19,
author = {Christian Skalka and Sepehr Amir-Mohammadian and Samuel Clark},
title = {\textcolor{navy}{Maybe Tainted Data: Theory and a Case Study}},
journal = {Journal of Computer Security},
year = 2020,
volume = 28,
number = 3,
pages = {295-335},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-amir-clark-jcs19.pdf},
abstract = { Dynamic taint analysis is often used as a defense
against low-integrity data in applications with
untrusted user interfaces. An important example is
defense against XSS and injection attacks in
programs with web interfaces. Data sanitization is
commonly used in this context, and can be treated as
a precondition for endorsement in a dynamic
integrity taint analysis. However, sanitization is
often incomplete in practice. We develop a model of
dynamic integrity taint analysis for Java that
addresses imperfect sanitization with an in-depth
approach. To avoid false positives, results of
sanitization are endorsed for access control (aka
prospective security), but are tracked and logged
for auditing and accountability (aka retrospective
security). We show how this heterogeneous
prospective/retrospective mechanism can be specified
as a uniform policy, separate from code. We then use
this policy to establish correctness conditions for
a program rewriting algorithm that instruments code
for the analysis. These conditions synergize our
previous work on the semantics of audit logging with
\emph{explicit integrity} which is an analogue of
noninterference for taint analysis. A technical
contribution of our work is the extension of
explicit integrity to a high-level functional
language setting with structured data, vs.~previous
systems that only address low level languages with
unstructured data. Our approach considers
endorsement which is crucial to address
sanitization. An implementation of our rewriting
algorithm is presented that hardens the OpenMRS
medical records software system with in-depth taint
analysis, along with an empirical evaluation of the
overhead imposed by instrumentation. Our results
show that this instrumentation is practical.}
}
@inproceedings{skalka-etal-ccs19,
author = {Christian Skalka and John Ring and David Darais and Minseok Kwon and Sahil Gupta and Kyle Diller and Steffen Smolka and Nate Foster},
title = {\textcolor{navy}{Proof Carrying Network Code}},
booktitle = {ACM Conference on Computer and Communications Security (CCS)},
year = 2019,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-etal-ccs19.pdf},
abstract = {Computer networks often serve as the first line of
defense against malicious attacks. Although there
are a growing number of software defined networking
(SDN) tools for defining and enforcing security
policies, most assume a single administrative domain
and are unable to handle the challenges that arise
in networks that could beneficially be programmed by
multiple administrative domains. For example,
consumers may want want to allow their home IoT
networks to be configured by device vendors, which
raises security and privacy concerns. In this
paper we propose a framework called Proof Carrying
Network Code (PCNC) for specifying and enforcing
security in SDNs with interacting administrative
domains. Like Proof Carrying Authorization (PCA),
PCNC provides methods for authorization domains for
network reprogramming, and like Proof Carrying Code
(PCC), PCNC provides methods for enforcing desired
behavior of network programs. We develop theoretical
foundations for PCNC and evaluate it in simulated
and real network settings, in a case study that
considers security in IoT networks for at-home
health monitoring. }
}
@article{wshah-skalka-price-jmir19,
author = {Safwan Wshah and Christian Skalka and Matthew Price},
title = {\textcolor{navy}{Predicting Risk of Posttraumatic Stress Disorder Symptomology Using Machine Learning}},
journal = {JMIR Mental Health},
year = 2019,
volume = 6,
number = 7,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/wshah-skalka-price-jmir19.pdf},
abstract = { A majority of adults in the United States are exposed
to a potentially traumatic event but only a handful
go on to develop impairing mental health conditions
such as posttraumatic stress disorder (PTSD).
Identifying those at elevated risk
shortly after trauma exposure is a clinical
challenge. The aim of this study was to develop
computational methods to more effectively identify
at-risk patients and, thereby, support better early
interventions. We proposed machine
learning (ML) induction of models to automatically
predict elevated PTSD symptoms in patients 1 month
after a trauma, using self-reported symptoms from
data collected via smartphones. We show
that an ensemble model accurately predicts elevated
PTSD symptoms, with an area under the curve (AUC) of
.85, using a bag of support vector machines, naive
Bayes, logistic regression, and random forest
algorithms. Furthermore, we show that only 7
self-reported items (features) are needed to obtain
this AUC. Most importantly, we show that accurate
predictions can be made 10 to 20 days posttrauma.
These results suggest that simple
smartphone-based patient surveys, coupled with
automated analysis using ML-trained models, can
identify those at risk for developing elevated PTSD
symptoms and thus target them for early
intervention.}
}
@article{price-etal-ejp18,
author = {Price, M. and van Stolk-Cooke, K. and Legrand, A. C. and Brier, Z. M. F. and Ward, H. L. and Connor, J. P. and Gratton, J. and Freeman, K. and Skalka, C.},
title = {\textcolor{navy}{Implementing Assessments via Mobile During the Acute Posttrauma Period: Feasibility, Acceptability, and Strategies to Improve Response Rates}},
journal = {European Journal of Psychotraumatology},
year = 2018,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/price-etal-ejp18.pdf},
abstract = {PTSD is posited to develop in the acute posttrauma
period. Few studies have examined psychopathology
symptoms within this period due to the demands on
individuals 10 in the first month after a
trauma. Mobile devices can overcome these
barriers. The feasibility of using mobile devices
for this purpose, however, is unclear. The present
study evaluated the acceptability of administering
PTSD symptom assessments via a mobile application
throughout the acute posttrauma period. The study
suggests that using mobile devices to monitor mental
health symptoms during the acute posttrauma period
is feasible and acceptable. Strategies are needed to
determine how to best take advantage of these data
once collected.}
}
@inproceedings{capobianco-etal-tapp17,
author = {Frank Capobianco and Christian Skalka and Trent Jaeger},
title = {\textcolor{navy}{Tracking the Provenance of Access Control Decisions}},
booktitle = {International Workshop on Theory and Practice of Provenance (TaPP)},
year = 2017,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/capobianco-etal-tapp17.pdf},
abstract = {Access control is a necessary defense to protect
security-sensitive operations. Unfortunately, access
control mechanisms are implemented manually in
practice, which can lead to errors that can be
exploited. Prior work aims to find such errors
through static analysis, but the correctness of
access control enforcement depends on runtime
factors, such as the access control policies
enforced and adversary control of the program
inputs. As a result, we propose to apply provenance
tracking to find flaws in access control
enforcement. To do so, we track decisions made in
deploying access control enforcement to enable
detection of flaws. We have developed AutoProv, a
Java bytecode analysis tool capable of retrofitting
legacy Java applications with provenance hooks. We
utilize AutoProv to add provenance hooks at all
locations that either may require access control
enforcement or may impact access control policy
decisions. We evaluate our tool on OpenMRS, an open
source medical record system,which is used across
the globe to manage sensitive data for millions of
patients.}
}
@inproceedings{petracca-etal-sacmat17,
author = {Giuseppe Petracca and Frank Capobianco and Christian Skalka and Trent Jaeger},
title = {\textcolor{navy}{On Risk in Access Control Enforcement}},
booktitle = {ACM Symposium on Access Control Models and Technologies (SACMAT)},
year = 2017,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/petracca-etal-sacmat17.pdf},
abstract = {While we have long had principles describing how access
control enforcement should be implemented, such as
the reference monitor concept, imprecision in
access control mechanisms and access control
policies leads to risks that may enable
exploitation. In practice, least privilege access
control policies often allow information ows that may
enable exploits. In addition, the implementation of
access control mechanisms often tries to balance
security with ease of use implicitly (e.g., with
respect to determining where to place authorization
hooks) and approaches to tighten access control, such
as accounting for program context, are ad hoc. In
this paper, we define four types of risks in access
control enforcement and explore possible approaches
and challenges in tracking those types of risks. In
principle, we advocate runtime tracking to produce
risk estimates for each of these types of risk. To
better understand the potential of risk estimation for
authorization, we propose risk estimate functions
for each of the four types of risk, finding that
benign program deployments accumulate risks in each
of the four areas for ten Android programs
examined. As a result, we find that tracking of
relative risk may be useful for guiding changes to
security choices, such as authorized unsafe
operations or placement of authorization checks,
when risk differs from that expected.}
}
@inproceedings{basu-etal-ancs17,
author = {Shrutarshi Basu and Nate Foster and Hossein Hojjat and Paparao Palacharla and Christian Skalka and Xi Wang},
title = {\textcolor{navy}{Life on the Edge: Unraveling Policies into Configurations}},
booktitle = {ACM/IEEE Symposium on Architectures for Networking and Communications Systems (ANCS)},
year = 2017,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/basu-etal-ancs17.pdf},
abstract = {Current frameworks for network programming assume that
the network comprises a set of homogenous devices
that can be rapidly reconfigured in response to
changing policies and network conditions.
Unfortunately, these assumptions are incompatible
with the realities of modern networks, which contain
legacy devices that offer diverse functionality and
can only be reconfigured slowly. Additionally,
network service providers need to walk a fine line
between providing flexibility to users, and
maintaining the integrity and reliability of their
core networks. These issues are particularly evident
in optical networks, which are used by ISPs andWANs
and provide high bandwidth at the cost of limited
flexibility and long reconfiguration times. This
paper presents a different approach to implementing
high-level policies, by pushing functionality to the
edge and using the core merely for transit. Building
on the NetKAT framework and leveraging linear
programming solvers, we develop techniques for
analyzing and transforming policies into
configurations that can be installed at the edge of
the network. Furthermore, our approach can be
extended to incorporate constraints that are crucial
in the optical domain, such as path constraints. We
develop a working implementation using off-the-shelf
solvers and evaluate our approach on realistic
optical topologies. }
}
@article{price-etal-jtbs17,
author = {Price, M. and van Stolk-Cooke, K. and Ward., H. L. and O’Keefe, M. and Gratton, J. and Skalka, C. and Freeman, K. },
title = {\textcolor{navy}{Tracking Post-Trauma Psychopathology Using Mobile Applications: A Usability Study}},
journal = {Journal of Technology in Behavioral Science},
year = 2017,
pages = {1--8},
issn = {2366-5963},
doi = {10.1007/s41347-016-0008-9},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/price-etal-jtbs17.pdf},
abstract = { Trauma exposure markedly increases risk for
psychopathology including posttraumatic stress
disorder (PTSD). Understanding the course by which
PTSD develops after a traumatic event is critical to
enhancing early intervention. Although prior work
has explored the course of PTSD symptoms in the
subsequent months, relatively few studies have
explored the course of symptoms in the acute
posttrauma period, defined as the 30 days after a
traumatic event. A key challenge to conducting such
studies is the lack of efficient means to collect
data that does not impose significant burden on the
participant during this time. The present study
evaluated the use of a mobile phone application to
collect symptom data during the acute posttrauma
period. Data was obtained from 23 individuals who
experienced a criterion A traumatic event and were
recruited from the emergency department of a level-1
trauma center. Participants completed 44.93\% of
daily assessments across a 30-day period. Responses
rates were uncorrelated with PTSD symptoms or
depression symptoms at 1- and 3-month
posttrauma. Participants reported that the surveys
were moderately helpful and posed minimal
burden. These findings suggest that mobile
applications can be used to learn about the course
of posttrauma recovery.}
}
@inproceedings{amir-skalka-plas16,
author = {Sepehr Amir-Mohammadian and Christian Skalka},
title = {\textcolor{navy}{In-Depth Enforcement of Dynamic Integrity Taint Analysis}},
booktitle = {ACM Programming Languages and Security Workshop (PLAS)},
year = 2016,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/amir-skalka-plas16.pdf},
abstract = {Dynamic taint analysis can be used as a defense against
low-integrity data in applications with untrusted
user interfaces. An important example is defense
against XSS and injection attacks in programs with
web interfaces. Data sanitization is commonly used
in this context, and can be treated as a
precondition for endorsement in a dynamic integrity
taint analysis. However, sanitization is often
incomplete in practice. We develop a model of
dynamic integrity taint analysis for Java that
addresses imperfect sanitization with an
\emph{in-depth} approach. To avoid false positives,
results of sanitization are endorsed for access
control (aka prospective security), but are tracked
and logged for auditing and accountability (aka
retrospective security). We show how this
heterogeneous prospective/retrospective mechanism
can be specified as a uniform policy, separate from
code. We then use this policy to establish
correctness conditions for a program rewriting
algorithm that instruments code for the analysis.
The rewriting itself is a model of existing,
efficient Java taint analysis tools.}
}
@inproceedings{kriegman-etal-ppsn16.pdf,
author = {Sam Kriegman and Marcin Szubert and Josh Bongard and Christian Skalka},
title = {\textcolor{navy}{Evolving Spatially Aggregated Features From Satellite Imagery for Regional Modelin}},
booktitle = {International Conference on Parallel Problem Solving from Nature (PPSN)},
year = 2016,
note = {Nominated for Best Paper Award.},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/kriegman-etal-ppsn16.pdf},
abstract = { Satellite imagery and remote sensing provide
explanatory variables at relatively high resolutions
for modeling geospatial phenomena, yet regional
summaries are often desirable for analysis and
actionable insight. In this paper, we propose a
novel method of inducing spatial aggregations as a
component of the machine learning process, yielding
regional model features whose construction is driven
by model prediction performance rather than prior
assumptions. Our results demonstrate that Genetic
Programming is particularly well suited to this type
of feature construction because it can automatically
synthesize appropriate aggregations, as well as
better incorporate them into regression models
compared to other regression models we tested. In
our experiments we consider a specific problem
instance and real-world dataset relevant to
predicting snow properties in high-mountain Asia. }
}
@incollection{yousefi-skalka-bongard-ciwsn16,
author = {Afsoon Yousefi and Christian Skalka and Josh Bongard},
title = {\textcolor{navy}{A Genetic Programming Approach to Cost-Sensitive Control in Wireless Sensor Networks}},
booktitle = {Computational Intelligence in Wireless Sensor Networks: Recent Advances and Future Challenges},
publisher = {Springer},
year = 2016,
publisher = {Springer International Publishing},
pages = {1--31},
isbn = {978-3-319-47715-2},
doi = {10.1007/978-3-319-47715-2_1},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/yousefi-skalka-bongard-ciwsn16.pdf},
abstract = { In some wireless sensor network applications, multiple
sensors can be used to measure the same variable,
while differing in their sampling \emph{cost}, for
example in their power requirements. This raises the
problem of automatically controlling heterogeneous
sensor suites in wireless sensor network
applications, in a manner that balances cost and
accuracy of sensors. We apply genetic programming
(GP) to this problem, considering two basic
approaches. First, we construct a hierarchy of
models, where increasing levels in the hierarchy use
sensors of increasing cost. If a model that polls
low cost sensors exhibits too much prediction
uncertainty, the burden of prediction is
automatically transferred to a higher level model
using more expensive sensors. Second, we train
models with cost as an optimization objective,
called non-hierarchical models, that use
conditionals to automatically select sensors based
on both cost and accuracy. We compare these
approaches in a setting where the available budget
for sampling is considered to remain constant, and
in a setting where the system is sensitive to a
fluctuating budget, for example available battery
power. We show that in both settings, for
increasingly challenging datasets, hierarchical
models makes predictions with equivalent accuracy
yet lower cost than non-hierarchical models. }
}
@inproceedings{amir-chong-skalka-post16,
author = {Sepehr Amir-Mohammadian and Stephen Chong and Christian Skalka},
title = {\textcolor{navy}{Correct Audit Logging: Theory and Practice}},
booktitle = {Principles of Security and Trust (POST)},
year = 2016,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/amir-chong-skalka-post16.pdf},
abstract = { Retrospective security has become increasingly
important to the theory and practice of cyber
security, with auditing a crucial component of
it. However, in systems where auditing is used,
programs are typically instrumented to generate
audit logs using manual, ad-hoc strategies. This is
a potential source of error even if log analysis
techniques are formal, since the relation of the log
itself to program execution is unclear. This paper
focuses on provably correct program rewriting
algorithms for instrumenting formal logging
specifications. Correctness guarantees that the
execution of an instrumented program produces sound
and complete audit logs, properties defined by an
information containment relation between logs and
the program's logging semantics. We also propose a
program rewriting approach to instrumentation for
audit log generation, in a manner that guarantees
correct log generation even for untrusted
programs. As a case study, we develop such a tool
for OpenMRS, a popular medical records management
system, and consider instrumentation of break the
glass policies. }
}
@article{price-etal-jmir15,
author = {Matthew Price and Tyler J. Sawyer and Madison Harris and Christian Skalka},
title = {\textcolor{navy}{Usability Evaluation of a Mobile Monitoring System to Assess Symptoms After a Traumatic Injury: A Mixed Method Study.}},
journal = {Journal of Medical Internet Research – Mental Health},
year = {2016},
month = {Jan},
day = {11},
volume = {3},
number = {1},
pages = {e3},
doi = {10.2196/mental.5023},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/price-etal-jmir15.pdf},
abstract = { Victims of trauma are at high risk for mental health
conditions such as posttraumatic stress disorder and
depression. Regular assessment of mental health
symptoms in the post-trauma period is necessary to
identify those at greatest risk and provide
treatment. The multiple demands of the acute
post-trauma period present numerous barriers to such
assessments. Mobile applications are a method by
which to overcome these barriers in order to
regularly assess symptoms, identify those at risk,
and connect patients to needed services. The current
study conducted a usability evaluation of a system
to monitor mental health symptoms after a
trauma. The system was developed to promote ease of
use and facilitate quick transmission of data. The
results of the current study indicate that a mobile
application to monitor post- trauma mental health
symptoms would be well received by
victims. Personalized feedback to the user was
identified as critical to promote the usability of
the software. }
}
@inproceedings{amir-chong-skalka-law15,
author = {Sepehr Amir-Mohammadian and Stephen Chong and Christian Skalka},
title = {\textcolor{navy}{Foundations for Auditing Assurance}},
booktitle = {Layered Assurance Workshop (LAW)},
year = 2015,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/amir-chong-skalka-law15.pdf},
abstract = { Retrospective security is an important element of
layered security systems. Auditing is central to the
theory and practice of retrospective security,
however, in systems where auditing is used, programs
are typically instrumented to generate audit logs
using manual, ad-hoc strategies. This is a potential
source of error even if log auditing techniques are
formal, since the relation of the log itself to
program execution is unclear. This paper focuses on
provably correct program rewriting algorithms for
instrumenting formal logging
specifications. Correctness guarantees that
execution of an instrumented program produces sound
and complete audit logs, properties defined by an
information containment relation between logs and
the program's logging semantics. As an application
example, we consider auditing for break the glass
policies, wherein authorization is replaced by
auditing in emergency conditions. }
}
@inproceedings{zowj-bongard-skalka-gecco15,
author = {Afsoon Yousefi Zowj and Joshua C Bongard and Christian Skalka},
title = {\textcolor{navy}{A Genetic Programming Approach to Cost-Sensitive Control in Resource Constrained Sensor Systems}},
booktitle = {ACM Genetic and Evolutionary Computation Conference (GECCO)},
year = 2015,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/zowj-bongard-skalka-gecco15.pdf},
abstract = { Resource constrained sensor systems are an increasingly
attractive option in a variety of environmental
monitoring domains, due to continued improvements in
sensor technology. However, sensors for the same
measurement application can differ in terms of cost
and accuracy, while fluctuations in environmental
conditions can impact both application requirements
and available energy. This raises the problem of
automatically controlling heterogeneous sensor
suites in resource constrained sensor system
applications, in a manner that balances cost and
accuracy of available sensors. We present a method
that employs a hierarchy of model ensembles trained
by genetic programming (GP): if model ensembles that
poll low-cost sensors exhibit too much prediction
uncertainty, they automatically transfer the burden
of prediction to other GP-trained model ensembles
that poll more expensive and accurate sensors. We
show that, for increasingly challenging datasets,
this hierarchical approach makes predictions with
equivalent accuracy yet lower cost than a similar
yet non-hierarchical method in which a single
GP-generated model determines which sensors to poll
at any given time. Our results thus show that a
hierarchy of GP-trained ensembles can serve as a
control algorithm for heterogeneous sensor suites in
resource constrained sensor system applications that
balances cost and accuracy. }
}
@article{buckingham-skalka-bongard-jhydro15not,
author = {David Buckingham and Christian Skalka and Joshua Bongard},
title = {\textcolor{navy}{Inductive Learning of Snowpack Distribution Models for Improved Estimation of Areal Snow Water Equivalent}},
journal = {Journal of Hydrology},
year = 2015,
volume = 524,
pages = {311-325},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/buckingham-bongard-skalka-jhydro15.pdf},
abstract = { Infrastructure for the automatic collection of
single-point measurements of snow water equivalent
(SWE) is well-established. However, because SWE
varies significantly over space, the estimation of
SWE at the catchment scale based on a single-point
measurement is error-prone. We propose low-cost,
lightweight methods for near-real-time estimation of
mean catchment-wide SWE using existing
infrastructure, wireless sensor networks, and
machine learning algorithms. Because snowpack
distribution is known to be highly nonlinear, we
focus on genetic programming (GP), a nonlinear,
white-box, inductive machine learning algorithm.
Because we did not have access to near-real-time
catchment-scale SWE data, we used available data as
ground truth for machine learning in a set of
experiments that are successive approximations of
our goal of catchment-wide SWE estimation. First,
we used a history of maritime snowpack data
collected by manual snow courses as our ground truth
estimate of mean catchment SWE. Second, we used
distributed snow depth (SD) data collected
automatically by wireless sensor networks. Thus SD
served as an alternative to SWE. Because SD
variability is significantly greater than density
variability, the primary requirement for estimating
SWE over an area is an understanding of SD. We
compared the performance of GP against linear
regression (LR), binary regression trees (BT), and a
widely used basic method (BM) that naively assumes
non-variable snowpack. In the first experiment set,
GP and LR models predicted SWE with lower error
than BM. In the second experiment set, GP had lower
error than LR, but outperformed BT only when we
applied a technique for determining training and
testing datasets that specifically mitigated the
possibility of over-fitting.}
}
@inproceedings{ganapathy-jaeger-skalka-tan-law14,
author = {Vinod Ganapathy and Trent Jaeger and Christian Skalka and Gang Tan},
title = {\textcolor{navy}{Assurance for Defense in Depth via Retrofitting}},
booktitle = {Layered Assurance Workshop},
year = 2014,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/ganapathy-jaeger-skalka-tan-law14.pdf},
abstract = {The computer security community has long advocated
\textit{defense in depth}, the concept of building
multiple layers of defense to protect a system.
Unfortunately, it has been difficult to realize this
vision in practice, and software often ships with
inadequate defenses, typically developed in an ad
hoc fashion. Currently, programmers reason about
security manually and lack tools to validate
assurance that security controls provide
satisfactory defenses. In this position paper, we
propose STRATA---a holistic framework for
defense in depth. We examine application of
STRATA in the context of adding security controls
to legacy code for authorization, containment, and
auditing. The STRATA framework aims to support a
combination of: (1) interactive techniques to
develop {\em retrofitting policies} that describe
the connection between program constructs and
security policy and (2) automated techniques to
produce optimal security controls that satisfy
retrofitting policies. We show that by reasoning
about defense in depth a variety of advantages can
be obtained, including optimization, continuous
improvement, and assurance across multiple security
controls. }
}
@article{chong-skalka-vaughan-jdiq14,
author = {Stephen Chong and Christian Skalka and Jeffrey A. Vaughan},
title = {\textcolor{navy}{Self-Identifying Data for Fair Use}},
journal = {ACM Journal of Data and Information Quality},
year = 2014,
volume = 5,
number = 3,
pages = {Article 11},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chong-skalka-vaughan-jdiq14.pdf},
abstract = { Public-use earth science datasets are a useful resource
with the unfortunate feature that their provenance
is easily disconnected from their
content. ``Fair-use policies'' typically associated
with these datasets require appropriate attribution
of providers by users, but sound and complete
attribution is difficult if provenance information
is lost. To address this we introduce a technique to
directly associate provenance information with
sensor datasets. Our technique is similar to
traditional watermarking but is intended for
application to unstructured time-series datasets.
Our approach is potentially imperceptible given
sufficient margins of error in datasets, and is
robust to a number of benign but likely
transformations including truncation, rounding,
bit-flipping, sampling, and reordering. We provide
algorithms for both one-bit and blind mark checking,
and show how our system can be adapted to various
data representation types. Our algorithms are
probabilistic in nature and are characterized by
both combinatorial and empirical analyses. Mark
embedding can be applied at any point in the data
lifecycle, allowing adaptation of our scheme to
social or scientific concerns. }
}
@article{chapin-skalka-tissec14,
author = {Peter Chapin and Christian Skalka},
title = {\textcolor{navy}{{SpartanRPC}: Remote Procedure Call Authorization in Wireless Sensor Networks}},
journal = {ACM Transactions on Information and System Security},
year = 2014,
volume = 17,
number = 2,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-skalka-tissec14.pdf},
abstract = {We describe SpartanRPC, a secure middleware technology that supports
cooperation between distinct security domains in wireless sensor
networks. SpartanRPC extends nesC to provide a link-layer remote
procedure call (RPC) mechanism, along with an enhancement of
configuration wirings that allow specification of remote, dynamic
endpoints. RPC invocation is secured via an authorization logic that
enables servers to specify access policies, and requires clients to
prove authorization. This mechanism is implemented using a
combination of symmetric and public key cryptography. We report on
benchmark testing of a prototype implementation, and on an
application of the framework that supports secure collaborative use
and administration of an existing WSN data gathering system.
}
}
@inproceedings{chapin-etal-gpce13,
author = {Peter Chapin and Christian Skalka and Scott Smith and Michael Watson},
title = {\textcolor{navy}{{Scalaness/nesT}: Type Specialized Staged Programming for Sensor Networks}},
year = 2013,
booktitle = {ACM Generic Programming: Concepts and Experiences (GPCE)},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-etal-gpce13.pdf},
abstract = {Programming wireless embedded networks is challenging due to
severe limitations on processing speed, memory, and bandwidth. Staged
programming can help bridge the gap between high level code
refinement techniques and efficient device level programs by
allowing a first stage program to specialize device level code. Here
we introduce a two stage programming system for wireless sensor
networks. The first stage program is written in our extended dialect
of Scala, called Scalaness, where components written in our type
safe dialect of nesC, called nesT, are composed and specialized.
Scalaness programs can dynamically construct TinyOS-compliant nesT
device images that can be deployed to motes. A key result, called
cross-stage type safety, shows that successful static type checking
of a Scalaness program means no type errors will arise either during
programmatic composition and specialization of WSN code, or later on
the WSN itself. Scalaness has been implemented through direct
modification and plug-in extension of the Scala compiler.
Implementation of a staged public-key cryptography calculation shows
the sensor memory footprint can be significantly reduced by staging.}
}
@inproceedings{skalka-frolik-realwsn13,
author = {Christian Skalka and Jeff Frolik},
title = {\textcolor{navy}{Snowcloud: A Complete System for Snow Hydrology Research}},
year = 2013,
booktitle = {ACM Workshop on Real-World Wireless Sensor Networks (RealWSN)},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-frolik-realwsn13.pdf},
abstract = {Snowcloud is a data gathering system for snow hydrology
field research campaigns conducted in harsh climates
and remote areas. The system combines distributed
wireless sensor network technology and computational
techniques to provide data to researchers at lower
cost and higher spatial resolution than ground-based
systems using traditional ``monolithic''
technologies. Supporting the work of a variety of
collaborators, Snowcloud has seen multiple Winter
deployments in settings ranging from high desert to
arctic, resulting in over a dozen node-years of
practical experience. In this paper, we discuss both
the system design and deployment experiences. }
}
@article{liu-skalka-smith-hosc11submitted,
author = {Yu David Liu and Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Type-Specialized Staged Programming with Process Separation}},
journal = {Journal of Higher Order and Symbolic Computation},
year = 2012,
address = {Edinburgh, Scotland},
volume = 24,
number = 4,
pages = {341-385},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/liu-skalka-smith-hosc11submitted.pdf},
abstract = {Staging is a powerful language construct that allows a program at
one stage of evaluation to manipulate and specialize a program to be
executed at a later stage. We propose a new staged language
calculus, , which extends the programmability of staged
languages in two directions.
%
First, supports \emph{dynamic type specialization}: types
can be dynamically constructed, abstracted, and passed as arguments,
while preserving decidable typechecking via a System F$_\le$-style
semantics combined with a restricted form of $\lambda_\omega$-style
runtime type construction. With dynamic type specialization the data structure
layout of a program can be optimized via staging.
%
Second, works in a context where different stages of
computation are executed in different process spaces, a property we
term \emph{staged process separation}. Programs at different stages
can directly communicate program data in via a built-in
serialization discipline.
%
The language is endowed with a metatheory including type
preservation, type safety, and decidability as demonstrated
constructively by a sound type checking algorithm.
%
While our language design is general, we are particularly interested
in future applications of staging in resource-constrained and
embedded systems: these systems have limited space for code and
data, as well as limited CPU time, and specializing code for the
particular deployment at hand can improve efficiency in all of these
dimensions. The combination of dynamic type specialization and
staging across processes greatly increases the utility of staged
programming in these domains. We illustrate this via wireless
sensor network programming examples.
}
}
@inproceedings{moeser-walker-skalka-frolik-wsc11,
author = {C. David Moeser and Mark Walker and Christian Skalka and Jeff Frolik},
title = {\textcolor{navy}{Application of a wireless sensor network for
distributed snow water equivalence estimation}},
year = 2011,
booktitle = {Western Snow Conference},
docx = {http://ceskalka.w3.uvm.edu/skalka-pubs/moeser-walker-skalka-frolik-wsc11.docx},
abstract = {Snow accumulated in mountainous areas is the
source of water supply for much of the western
United States. Expected amounts of annual
discharge in rivers are based on snow water
equivalence (SWE) measurements and regression
models that have climate stability as an
underlying assumption. Given climate change,
estimates should change from statistical to
physically-based models. The current data
collection network for SWE provides sparse
areal coverage. Inexpensive wireless sensor
networks and simple estimation techniques
could inexpensively extend the current network.
We report results of deployment of a prototype
wireless sensor network, Snowcloud, at the
Sagehen Creek, CA experimental field station.
The network reported snow depth and temperature
from January.May, 2010. We extrapolated
from an index site to estimate SWE with SD,
based on the assumption of stability SWE/SD
and predicted SWE with reasonable accuracy
(average difference of +1.0 cm (0.4 in),
standard deviation = 3.0 cm (1.2 in)).
Regression analysis indicated significant
associations (P<.05) between SWE and % canopy
closure to the north, weekly total incoming
solar radiation and monthly average temperature.
These results indicate that wireless sensor
networks measuring SD can be used to extend
information from snow measurement sites to give
estimates of water availability in snowpack. }
}
@inproceedings{chapin-skalka-mass10,
author = {Peter Chapin and Christian Skalka},
title = {\textcolor{navy}{SpartanRPC: WSN Middleware for Cooperating Domains}},
year = 2010,
booktitle = {IEEE Conference on Mobile and Ad-Hoc Sensor Systems},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-skalka-mass10.pdf},
abstract = {
In this paper we describe SpartanRPC, a secure middleware technology for wireless sensor
network (WSN) applications supporting cooperation between distinct protection domains. The
SpartanRPC system extends the nesC programming language to provide a link-layer remote
procedure call (RPC) mechanism, along with an extension of nesC configuration wirings that
allow specification of remote, dynamic endpoints. SpartanRPC also incorporates a
capability-based security architecture for protection of RPC resources in a heterogeneous
trust environment, via language-level policy specification and enforcement. We discuss an
implementation of SpartanRPC based on program transformation and AES cryptography, and present
empirical performance results.
}
}
@inproceedings{chong-skalka-vaughan-ipsn10,
author = {Stephen Chong and Christian Skalka and Jeffrey A. Vaughan},
title = {\textcolor{navy}{Self-Identifying Sensor Data}},
year = 2010,
booktitle = {ACM Conference on Information Processing in Sensor Networks (IPSN)},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chong-skalka-vaughan-ipsn10.pdf},
abstract = {Public-use sensor datasets are a useful scientific
resource with the unfortunate feature that their
provenance is easily disconnected from their content.
To address this we introduce a technique to directly
associate provenance information with sensor datasets.
Our technique is similar to traditional watermarking but
is intended for application to unstructured time-series
data. Our approach is potentially imperceptible given
sufficient margins of error in datasets, and is robust
to a number of benign but likely transformations
including truncation, rounding, bit-flipping, sampling,
and reordering. We provide algorithms for both one-bit
and blind mark checking. Our algorithms are
probabilistic in nature and are characterized by a
combinatorial analysis.}
}
@inproceedings{liu-skalka-smith-wgp09,
author = {Yu David Liu and Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Type-Specialized Staged Programming with Process Separation}},
booktitle = {Workshop on Generic Programming (WGP09)},
year = 2009,
address = {Edinburgh, Scotland},
note = {\emph{Note:} this version contains minor revisions to the article published
in WGP09 Proceedings. },
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/liu-skalka-smith-wgp09.pdf},
abstract = {Staging is a powerful language construct that allows a
program at one stage to manipulate and specialize a
program at the next. We propose as a new
staged calculus designed with novel features for staged
programming in modern computing platforms such as
embedded systems. A distinguishing feature of
is a model of process separation, whereby
different stages of computation are executed in
different process spaces. Our language also supports
dynamic type specialization via type abstraction,
dynamic type construction, and a limited form of type
dependence. is endowed with a largely
standard metatheory, including type preservation and
type safety results. We discuss the utility of our
language via code examples from the domain of wireless
sensor network programming. }
}
@techreport{makarov-skalka-techreport09,
author = {Evgeny Makarov and Christian Skalka},
title = {\textcolor{navy}{Formalized Proof of Type Safety of Hoare Type Theory}},
institution = {The University of Vermont},
year = 2009,
number = {CS-09-01},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/makarov-skalka-tr09.pdf },
abstract = {We prove type safety of the Hoare Type Theory (HTT), an
extension of Separation Logic and Hoare Logic to
higher-order functional programs. Our proof is
rather simple and is based on subject reduction,
unlike previous work on HTT by Birkedal et al.,
which uses nontrivial denotational
semantics. Further, we formalized our proof in the
Coq theorem prover. This formalization can be seen
as a basis of a platform for verifying higher-order
imperative programs that is alternative to Ynot,
another implementation of HTT in Coq. }
}
@inproceedings{skalka-etal-issw08,
author = {Christian Skalka and Jeff Frolik and Beverley Wemple},
title = {\textcolor{navy}{A Distributed In Situ System for Snow Water Equivalence Measurement}},
booktitle = {International Snow Science Workshop},
year = 2008,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-etal-issw08.pdf},
abstract = {We describe a ground-based system that provides quasi
real-time measurement and collection of snow-water
equivalent (SWE) data in remote settings. The system is
significantly cheaper and easier to deploy than current
methods and is more robust to terrain and snow bridging
effects. The system also enjoys several possible remote
data recovery solutions. Compared to current
infrastructure using existing technology, our system
features will combine to allow more sites to be installed
for the same cost and effort, in a greater variety of
terrain, enabling data collection at improved spatial
resolutions. The system integrates a new computational
architecture with new sensor technologies. Our
computational architecture is based on wireless sensor
networks, comprised of programmable, low-cost,
low-powered nodes capable of sophisticated sensor control
and remote data communication. Our sensor technology
works by measuring attenuation of electromagnetic
radiation, an approach that is immune to snow bridging
and significantly reduces sensor footprints.}
}
@article{skalka-hosc08,
author = {Christian Skalka},
title = {\textcolor{navy}{Types and Trace Effects for Object Orientation}},
year = {2008},
journal = {Journal of Higher Order and Symbolic Computation},
volume = 21,
number = 3,
pages = {239-282},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-hosc08.pdf},
abstract = {Trace effects are statically generated program
abstractions, that can be model checked for verification
of assertions in a temporal program logic. In this
paper we develop a type and effect analysis for
obtaining trace effects of Object Oriented programs in
Featherweight Java. We observe that the analysis is
significantly complicated by the interaction of trace
behavior with inheritance and other Object Oriented
features, particularly overridden methods, dynamic
dispatch, and downcasting. We propose an expressive
type and effect inference algorithm combining
polymorphism and subtyping/subeffecting constraints to
obtain a flexible trace effect analysis in this setting,
and show how these techniques are applicable to Object
Oriented features. We also extend the basic language
model with exceptions and stack-based event contexts,
and show how trace effects scale to these extensions by
structural transformations.},
note = {Extended version of \cite{skalka-ppdp05}.}
}
@article{chapin-skalka-wang-acmcs07,
author = {Peter Chapin and Christian Skalka and X. Sean Wang},
title = {\textcolor{navy}{Authorization in Trust Management: Features and Foundations}},
volume = 40,
number = 3,
pages = {1-48},
year = {2008},
journal = {ACM Computing Surveys},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-skalka-wang-acmcs07.pdf},
abstract = {Trust management systems are frameworks for
authorization in modern distributed systems, allowing
remotely accessible resources to be protected by
providers. By allowing providers to specify policy, and
access requesters to possess certain access rights,
trust management automates the process of determining
whether access should be allowed on the basis of policy,
rights, and an authorization semantics. In this paper
we survey modern state-of-the-art in trust management,
focusing on features of policy and rights languages that
provide the necessary expressiveness for modern
practice. We characterize systems in light of a generic
structure that takes into account components of
practical implementations. We emphasize systems that
have a formal foundation, since security properties of
them can be rigorously guaranteed. Underlying
formalisms are reviewed to provide necessary
background.}
}
@article{skalka-smith-vanhorn-jfp08,
author = {Christian Skalka and Scott Smith and David Van Horn},
title = {\textcolor{navy}{Types and Trace Effects of Higher Order
Programs}},
year = {2008},
volume = 18,
number = 2,
pages = {179-249},
journal = {Journal of Functional Programming},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-vanhorn-jfp07.pdf},
abstract = {This paper shows how type effect systems can be combined
with model-checking techniques to produce powerful,
automatically verifiable program logics for higher order
programs. The properties verified are based on the
ordered sequence of events that occur during program
execution, so called \emph{event traces}. Our type and
effect systems infer conservative approximations of the
event traces arising at run-time, and model-checking
techniques are used to verify logical properties of
these histories. Our language model is based on the
$\lambda$-calculus. Technical results include a type
inference algorithm for a polymorphic type effect
system, and a method for applying known model-checking
techniques to the \emph{trace effects} inferred by the
type inference algorithm, allowing static enforcement of
history- and stack-based security mechanisms. A type
safety result is proven for both unification and
subtyping constraint versions of the type system,
ensuring that statically well-typed programs do not
contain trace event checks that can fail at run-time.},
note = {Extended version of \cite{skalka-smith-aplas04}}
}
@inproceedings{shroff-smith-skalka-aplas07,
author = {Paritosh Shroff and Christian Skalka and Scott Smith},
title = {\textcolor{navy}{The Nuggetizer: Abstracting Away Higher Orderness for Program Verification.}},
year = {2007},
booktitle = {Proceedings of the Asian Programming Languages Symposium},
pdf = {http://www.cs.jhu.edu/~scott/pll/papers/nuggetizer-aplas07.pdf},
abstract = {We develop a static analysis which distills first-order
computational structure from higher-order functional
programs. The analysis condenses higher-order programs
into a first-order rule-based system, a \emph{nugget},
that characterizes all value bindings that may arise
from program execution. Theorem provers are limited in
their ability to automatically reason about higher-order
programs; nuggets address this problem, being
inductively defined structures that can be simply and
directly encoded in a theorem prover. The theorem prover
can then prove non-trivial program properties, such as
the range of values assignable to particular variables
at runtime. Our analysis is flow- and path-sensitive,
and incorporates a novel \emph{prune-rerun} analysis
technique to approximate higher-order recursive
computations.}
}
@inproceedings{skalka-ppdp07,
author = {Christian Skalka},
title = {\textcolor{navy}{Type Safe Dynamic Linking for {JVM} Access Control}},
year = {2007},
booktitle = {Proceedings of the ACM Symposium on Principles and Practice
of Declarative Programming},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-ppdp07.pdf},
abstract = {The Java JDK security model provides an access
control mechanism for the JVM based on dynamic
stack inspection. Previous results have
shown how stack inspection can be enforced at
compile time via whole-program type analysis,
but features of the JVM present significant
remaining technical challenges. For instance,
dynamic dispatch at the bytecode level requires
special consideration to ensure flexibility
in typing. Even more problematic is dynamic
class loading and linking, which disallow a
purely static analysis in principle, though
the intended applications of the JDK exploit
these features. We propose an extension to
existing bytecode verification, that enforces
stack inspection at link time, without imposing
new restrictions on the JVM class loading
and linking mechanism. Our solution is more
flexible than existing type based approaches,
and establishes a formal type safety result for
bytecode-level access control in the presence
of dynamic class linking.}
}
@article{skalka-wang-chapin-jcs07,
author = {Christian Skalka and X. Sean Wang and Peter Chapin},
title = {\textcolor{navy}{Risk Management for Distributed Authorization}},
year = {2007},
volume = 15,
number = 4,
pages = {447-489},
journal = {Journal of Computer Security},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-wang-chapin-jcs07.pdf},
abstract = {Distributed authorization takes into account several
elements, including certificates that may be provided by
non-local actors. While most trust management systems
treat all assertions as equally valid up to certificate
authentication, realistic considerations may associate
risk with some of these elements, for example some
actors may be less trusted than others. Furthermore,
practical online authorization may require certain
levels of risk to be tolerated. In this paper, we
introduce a trust management logic that incorporates
formal risk assessment. This formalization allows risk
levels to be associated with authorization elements, and
promotes development of a distributed authorization
algorithm allowing tolerable levels of risk to be
precisely specified and rigorously enforced.},
note = {Extended version of \cite{chapin-skalka-wang-fmse05}}
}
@article{skalka-hosc06submitted,
author = {Christian Skalka},
title = {\textcolor{navy}{Types and Trace Effects for Object Orientation}},
year = {2007},
journal = {Journal of Higher Order and Symbolic Computation},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-hosc06submitted.pdf},
abstract = {Trace effects are statically generated program
abstractions, that can be model checked for verification
of assertions in a temporal program logic. In this
paper we develop a type and effect analysis for
obtaining trace effects of Object Oriented programs in
Featherweight Java. We observe that the analysis is
significantly complicated by the interaction of trace
behavior with inheritance and other Object Oriented
features, particularly overridden methods, dynamic
dispatch, and downcasting. We propose an expressive
type and effect inference algorithm combining
polymorphism and subtyping/subeffecting constraints to
obtain a flexible trace effect analysis in this setting,
and show how these techniques are applicable to Object
Oriented features. We also extend the basic language
model with exceptions and stack-based event contexts,
and show how trace effects scale to these extensions by
structural transformations.},
note = {Extended version of \cite{skalka-ppdp05}.}
}
@inproceedings{polakow-skalka-plas06,
author = {Jeff Polakow and Christian Skalka},
title = {\textcolor{navy}{Specifying Distributed Trust Management in {LolliMon}}},
booktitle = {Proceedings of the ACM Workshop on Programming Languages and Analysis for Security},
year = {2006},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/polakow-skalka-plas06.pdf},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/polakow-skalka-plas06.ps},
abstract = {We propose the monadic linear logic programming language
LolliMon as a new foundation for the specification of
distributed trust management systems, particularly the
RT framework. LolliMon possesses features that make
it well-suited to this application, including rigorous
logical foundations, an expressive formula language,
strong typing, and saturation as a proof resolution
strategy. We specify certificate chain discovery in
full RT for authorization in a distributed
environment where certificates may be stored non-locally
and selective retrieval is necessary. The uniform
LolliMon specification of authorization and certificate
chain discovery eases formal reasoning about the system,
and scales to a rich collection of trust management
features. The executable LolliMon specification also
serves as a prototype implementation.}
}
@article{skalka-wang-csse06,
author = {Christian Skalka and X. Sean Wang},
title = {\textcolor{navy}{Trust but Verify: Authorization for Web Services}},
journal = {Journal of Computer Systems Science and Engineering},
year = {2006},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-wang-csse06.pdf},
abstract = {Through web service technology, distributed applications can be
built in an open and flexible manner, bringing tremendous power to
applications on the web. However, this flexibility poses
significant challenges to security. Traditional access control for
distributed systems is not flexible and efficient enough in such an
environment; in particular, fully secure online authorization
decisions may be too inefficient in practice, requiring
simplifications which may have only an informal and unverifiable
relation to fully secure authorization decisions.
This paper introduces a trust-but-verify framework for web services
authorization. In this framework, each web service
maintains the usual access control policies, as well as a ``trust
transformation'' policy, that formally specifies how to simplify full
authorization into a more efficient form for online checking. This
formalization allows certainty that offline checking verifies
the trust relation between full security and online checking.}
}
@inproceedings{chapin-skalka-wang-fmse05,
author = {Peter Chapin and Christian Skalka and X. Sean Wang},
title = {\textcolor{navy}{Risk Assessment in Distributed Authorization}},
booktitle = {Proceedings of the ACM Workshop on Formal Methods in Security Engineering},
year = 2005,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-skalka-wang-fmse05.pdf},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/chapin-skalka-wang-fmse05.ps},
abstract = {Distributed authorization takes into account several
elements, including certificates that may be
provided by non-local actors. While most trust
management systems treat all assertions as equally
valid up to certificate authentication, realistic
considerations may associate risk with some of these
elements; some actors may be less trusted than
others, some elements may be more computationally
expensive to obtain, and so forth. Furthermore,
practical online authorization may require certain
levels of risk to be tolerated. In this paper, we
introduce a trust management logic that incorporates
formal risk assessment. This formalization allows
risk levels to be associated with authorization
elements, and promotes development of a distributed
authorization algorithm allowing tolerable levels of
risk to be precisely specified and rigorously
enforced.}
}
@inproceedings{skalka-ppdp05,
author = {Christian Skalka},
title = {\textcolor{navy}{Trace Effects and Object Orientation}},
booktitle = {Proceedings of the ACM Conference on Principles and Practice of Declarative Programming},
year = 2005,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-ppdp05.pdf},
abstract = {\emph{Trace effects} are statically generated
program abstractions, that can be model checked for
verification of assertions in a temporal program
logic. In this paper we develop a type and effect
analysis for obtaining trace effects of Object
Oriented programs in Featherweight Java. We observe
that the analysis is significantly complicated by
the interaction of trace behavior with inheritance
and other Object Oriented features, particularly
overridden methods, dynamic dispatch, and
downcasting. We propose an expressive type and
effect inference algorithm combining polymorphism
and subtyping/subeffecting constraints to obtain a
flexible trace effect analysis in this setting, and
show how these techniques are applicable to Object
Oriented features.}
}
@inproceedings{skalka-smith-vanhorn-aiool05,
author = {Christian Skalka and Scott Smith and David Van Horn},
title = {\textcolor{navy}{A Type and Effect System for Flexible Abstract Interpretation of Java}},
booktitle = {Proceedings of the ACM Workshop on Abstract Interpretation of Object Oriented Languages},
year = 2005,
series = {Electronic Notes in Theoretical Computer Science},
month = {January},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-vanhorn-aiool05.pdf},
abstract = {This paper describes a flexible type and effect
inference system for Featherweight Java (FJ). The
effect terms generated by static type and effect
inference embody the abstract interpretation of
program event sequences. Flexibility in the
analysis is obtained by post-processing of inferred
effects, allowing a modular adaptation to extensions
of the language. Several example transformations
are discussed, including how inferred effects can be
transformed to reflect the impact of exceptions on
FJ control flow.}
}
@article{pottier-skalka-smith-toplas05,
author = {François Pottier and Christian Skalka and Scott Smith},
title = {\textcolor{navy}{A Systematic Approach to Static Access Control}},
journal = {ACM Transactions on Programming Languages and Systems},
year = {2005},
volume = 27,
number = 2,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/fpottier-skalka-smith-toplas03.pdf},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/fpottier-skalka-smith-toplas03.ps},
abstract = {The Java Security Architecture includes a dynamic mechanism for
enforcing access control checks, the so-called
\emph{stack inspection} process. While the architecture
has several appealing features, access control checks
are all implemented via dynamic method calls. This is a
highly non-declarative form of specification which is
hard to read, and which leads to additional run-time
overhead. This paper develops type systems which can
statically guarantee the success of these checks. Our
systems allow security properties of programs to be
clearly expressed within the types themselves, which
thus serve as static declarations of the security
policy. We develop these systems using a systematic
methodology: we show that the security-passing style
translation, proposed by Wallach, Appel and Felten as a
\emph{dynamic} implementation technique, also gives rise
to \emph{static} security-aware type systems, by
composition with conventional type systems. To define
the latter, we use the general HM$(X)$ framework, and
easily construct several constraint- and
unification-based type systems.}
}
@article{skalka-smith-ijis05,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Static Use-Based Object Confinement}},
journal = {Springer International Journal of Information Security},
year = {2005},
volume = {4},
number = {1-2},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-ijis05.pdf},
abstract = {The confinement of object references is a significant
security concern for modern programming languages. We
define a language that serves as a uniform model for a
variety of confined object reference systems. A
\emph{use-based} approach to confinement is adopted,
which we argue is more expressive than previous
communication-based approaches. We then develop a
readable, expressive type system for static analysis of
the language, along with a type safety result
demonstrating that run-time checks can be eliminated.
The language and type system thus serve as a reliable,
declarative and efficient foundation for secure
capability-based programming and object confinement.}
}
@inproceedings{skalka-wang-sws04,
author = {Christian Skalka and X. Sean Wang},
title = {\textcolor{navy}{Trust but Verify: Authorization for Web Services}},
booktitle = {ACM Workshop on Secure Web Services},
year = 2004,
month = {October},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-wang-sws04.pdf},
abstract = {Through web service technology, distributed applications
can be built in a flexible manner, bringing tremendous
power to applications on the web. However, this
flexibility poses significant challenges to security. In
particular, an end user (be it human or machine) may
access a web service directly, or through a number of
intermediaries, while these intermediaries may be formed
on the fly for a particular task. Traditional access
control for distributed systems is not flexible and
efficient enough in such an environment. Indeed, it may
be impossible for a web service to anticipate all
possible access patterns, hence to define an appropriate
access control list beforehand. Novel solutions are
needed.
This paper introduces a trust-but-verify framework for
web services authorization, and provides an
implementation example. In the trust-but-verify
framework, each web service maintains authorization
policies. In addition, there is a global set of ``trust
transformation'' rules, each of which has an associated
transformation condition. These trust transformation
rules convert complicated access patterns into simpler
ones, and the transformation is done by a requester (the
original requester or an intermediary) with the
assumption that the requester can be trusted to
correctly omit certain details. To verify
authorization, the requester is required to document
evidence that the associated transformation conditions
are satisfied. Such evidence and support information can
either be checked before access is granted, or can be
verified after the fact in an offline mode, possibly by
an independent third party.}
}
@inproceedings{skalka-smith-aplas04,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{History Effects and Verification}},
booktitle = {Asian Programming Languages Symposium},
month = {November},
year = 2004,
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-aplas04.pdf},
abstract = {This paper shows how type effect systems can be combined with
model-checking techniques to produce powerful,
automatically verifiable program logics for higher-order
programs. The properties verified are based on the
ordered sequence of events that occur during program
execution---an \emph{event history}. Our type and
effect systems automatically infer conservative
approximations of the event histories arising at
run-time, and model-checking techniques are used to
verify logical properties of these histories.
Our language model is based on the $\lambda$-calculus.
Technical results include a powerful type inference
algorithm for a polymorphic type effect system, and a
method for applying known model-checking techniques to
the \emph{history effects} inferred by the type
inference algorithm, allowing static enforcement of
history- and stack-based security mechanisms.}
}
@article{skalka-smith-entcs03,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Set Types and Applications}},
year = {2003},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-entcs03.ps},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-entcs03.pdf},
journal = {Electronic Notes in Theoretical Computer Science},
volume = {75},
abstract = {We present $\mathrm{pml}_B$, a programming language that includes
primitive sets and associated operations. The language
is equipped with a precise type discipline that
statically captures dynamic properties of sets, allowing
runtime optimizations. We demonstrate the utility of
$\mathrm{pml}_B$ by showing how it can serve as a practical
\emph{implementation} language for higher-level
programming language based security systems, and
characterize $\mathrm{pml}_B$ by comparing the expressiveness of
$\mathrm{pml}_B$ sets with enumerations.}
}
@article{skalka-pottier-entcs03,
author = {Christian Skalka and François Pottier},
title = {\textcolor{navy}{Syntactic Type Soundness for HM$(X)$}},
year = {2003},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-pottier-entcs03.ps},
pdf = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-pottier-entcs03.pdf},
journal = {Electronic Notes in Theoretical Computer Science},
volume = {75},
abstract = {The HM$(X)$ framework is a constraint-based type framework with built-in
let-polymorphism. This paper establishes purely syntactic type
soundness for the framework, treating an extended version of the
language containing state and recursive binding. These results
demonstrate that any instance of HM$(X)$, comprising a
specialized constraint system and possibly additional functional
constants and their types, enjoys syntactic type soundness.}
}
@inproceedings{skalka-smith-fcs02,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Static Use-Based Object Confinement}},
booktitle = {Proceedings of the Foundations of Computer Security Workshop (FCS
'02)},
month = {July},
year = {2002},
address = {Copenhagen, Denmark},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-fcs02.ps},
abstract = {The confinement of object references is a significant
security concern for modern programming languages. We
define a language that serves as a uniform model for a
variety of confined object reference systems. A
\emph{use-based} approach to confinement is adopted,
which we argue is more expressive than previous
communication-based approaches. We then develop a
readable, expressive type system for static analysis of
the language, along with a type safety result
demonstrating that run-time checks can be eliminated.
The language and type system thus serve as a reliable,
declarative and efficient foundation for secure
capability-based programming and object confinement.}
}
@inproceedings{pottier-skalka-smith-esop01,
author = {François Pottier and Christian Skalka and Scott Smith},
title = {\textcolor{navy}{A Systematic Approach to Static Access Control}},
booktitle = {Proceedings of the 10th European Symposium on
Programming (ESOP'01)},
editor = {David Sands},
publisher = {Springer Verlag},
series = {Lecture Notes in Computer Science},
volume = {2028},
pages = {30--45},
month = apr,
year = {2001},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/fpottier-skalka-smith-esop01.ps},
springer = {http://link.springer.de/link/service/series/0558/tocs/t2028.htm},
abstract = {The Java JDK 1.2 Security Architecture includes a
dynamic mechanism for enforcing access control
checks, so-called \emph{stack inspection}. This
paper studies type systems which can statically
guarantee the success of these checks. We develop
these systems using a new, systematic methodology:
we show that the security-passing style translation,
proposed by Wallach and Felten as a \emph{dynamic}
implementation technique, also gives rise to
\emph{static} security-aware type systems, by
composition with conventional type systems. To
define the latter, we use the general HM$(X)$
framework, and easily construct several constraint-
and unification-based type systems. They offer
significant improvements on a previous type system
for JDK access control, both in terms of
expressiveness and in terms of readability of type
specifications.}
}
@inproceedings{skalka-smith-icfp00,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Static Enforcement of Security with Types}},
booktitle = {Proceedings of the the Fifth {ACM} {SIGPLAN}
International Conference on Functional Programming
(ICFP'00)},
month = sep,
year = {2000},
pages = {34--45},
address = {Montréal, Canada},
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-icfp00.ps},
abstract = {A number of security systems for programming languages
have recently appeared, including systems for enforcing
some form of \emph{access control}. The Java JDK 1.2
security architecture is one such system that is widely
studied and used. While the architecture has many
appealing features, access control checks are all
implemented via dynamic method calls. This is a highly
non-declarative form of specification which is hard to
read, and which leads to additional run-time overhead.
In this paper, we present a novel \textit{security type
system} that enforces the same security guarantees as
Java Stack Inspection, but via a static type system
with no additional run-time checks. The system allows
security properties of programs to be clearly expressed
within the types themselves. We also define and prove
correct an inference algorithm for security types,
meaning that the system has the potential to be layered
on top of the existing Java architecture, without
requiring new syntax.}
}
@techreport{skalka-smith-specdiag02,
author = {Christian Skalka and Scott Smith},
title = {\textcolor{navy}{Diagrammatic Verification of Security Protocols}},
institution = {The Johns Hopkins University},
year = 2002,
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-smith-specdiag02.ps},
abstract = {The Actor Specification Diagram Language is a rigorously
defined graphical specification language, with features
for modelling concurrency and asynchronous
communication. These features make the language
appropriate for the formal analysis of distributed
security protocols. The graphical nature of the
language makes protocol descriptions easy to read and
understand, vital properties for appeal to a wider user
population. Proofs of protocol correctness can be given
via graphical transformation, with each step of
transformation justified by rigorous argument if
desired. To illustrate the utility of a graphical
specification language we analyze the well-known
Needham-Schroeder Pubic Key protocol. }
}
@phdthesis{skalka-phd-thesis,
author = {Christian Skalka},
title = {\textcolor{navy}{Types for Programming Language-Based Security}},
school = {The Johns Hopkins University},
year = {2002},
month = aug,
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-phd-thesis.ps},
abstract = {Programming language-based security provides
applications programmers with greater control and
flexibility for specifying and enforcing security
policies. As computing environments distribute and
diversify, and demands for program mobility increase,
this power allows programmers to adapt software to
developing needs, while still protecting resources.
This thesis advocates the use of static type
disciplines for expressing and enforcing programming
language-based security. We develop type systems for
two popular security models: the \emph{Access Control}
model with \emph{Stack Inspection}, and the
\emph{Object Confinement} model. Type safety proofs
demonstrate that these type systems reliably enforce
security statically, and imply that certain run-time
optimizations may be effected for programs. The
declarative nature of our type terms also provide
programmers with useful and understandable descriptions
of security properties. To formally develop these type
systems, a transformational approach is used, where
source languages are embedded in a target language,
containing sets of atomic elements and associated
operations, which is pre-equipped with a sound type
system. This target language and type system is
developed using the type constraint framework HM$(X)$.
The transformational approach and HM$(X)$ both allow the
re-use of existing theory and implementations, easing
proof effort, inspiring design, and providing greater
confidence in the correctness of results.}
}
@mastersthesis{skalka-ms-thesis,
author = {Christian Skalka},
title = {\textcolor{navy}{Some Decision Problems for {ML} Refinement Types}},
school = {Carnegie Mellon University},
year = 1997,
ps = {http://ceskalka.w3.uvm.edu/skalka-pubs/skalka-ms-thesis.ps},
abstract = {The ML \texttt{datasort} declaration is an extension to
Standard ML allowing the declaration of \emph{refinement}
types, which correspond to subsets of pre-existing ML
datatypes. Refinement types provide the programmer with
a more precise method of expressing data structure, can
prevent the occurence of certain run -time exceptions,
and assist in the expression and maintenance of some
representation invariants. ML refinement types
correspond to regular term and regular tree
languages, except that refinement types are polymorphic. While
algorithms have been defined which determine the
emptiness, intersection and subset problems for regular
terms, the corresponding problems for polymorphic
regular terms have not been studied. This thesis
formally describes a refinement type language, and
defines algorithms relevant to these decision problems,
which are proven correct. Along with theoretical
interest, these algorithms have practical applications
for refinement type checking.}
}
This file was generated by bibtex2html 1.99.