@INPROCEEDINGS{Hedin:Sands:CSFW06,
TITLE = {Noninterference in the presence of non-opaque pointers},
AUTHOR = {D. Hedin and D. Sands},
BOOKTITLE = {Proceedings of the 19th IEEE Computer Security
Foundations Workshop},
YEAR = {2006},
PUBLISHER = {IEEE Computer Society Press},
XPAGES = {255--269},
ABSTRACT = { A common theoretical assumption in the study of information flow
security in Java-like languages is that pointers are opaque -- i.e.,
that the only properties that can be observed of pointers are the
objects to which they point, and (at most) their equality. These
assumptions often fail in practice. For example, various important
operations in Java's standard API, such as hashcodes or
serialization, might break pointer opacity. As a result,
information-flow static analyses which assume pointer opacity risk
being unsound in practice, since the pointer representation provides
an unchecked implicit leak. We investigate information flow in the
presence of non-opaque pointers for an imperative language with
records, pointer instructions and exceptions, and develop an
information flow aware type system which guarantees noninterference.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/Hedin-Sands-CSFW06.pdf}
}
@TECHREPORT{Broberg:Sands:flowlocks,
AUTHOR = {N. Broberg and D. Sands},
TITLE = {Flow Locks: Towards a core calculus for Dynamic Flow Policies},
INSTITUTION = {Chalmers University of Technology and G\"oteborgs University},
YEAR = {2006},
PDF = {http://www.cs.chalmers.se/~dave/papers/Broberg-Sands-flowlocks-full.pdf},
MONTH = {May},
NOTE = { Draft. Extended version of \cite{Broberg:Sands:ESOP06}},
OPTANNOTE = {}
}
@INPROCEEDINGS{Hunt:Sands:POPL06,
AUTHOR = {S. Hunt and D. Sands},
TITLE = {On Flow-Sensitive Security Types},
BOOKTITLE = {POPL'06, Proceedings of the 33rd Annual. ACM SIGPLAN - SIGACT. Symposium. on Principles of Programming Languages},
YEAR = 2006,
ABSTRACT = { This article investigates formal properties of a family of
semantically sound flow-sensitive type systems for tracking
information flow in simple While programs. The family is indexed by
the choice of flow lattice.
By choosing the flow lattice to be the powerset of program variables,
we obtain a system which, in a very strong sense, subsumes all other
systems in the family (in particular, for each program, it provides a
principal typing from which all others may be inferred). This
distinguished system is shown to be equivalent to, though more simply
described than, Amtoft and Banerjee's Hoare-style independence logic
(SAS'04).
In general, some lattices are more expressive than others.
Despite this, we show that no type system in the family can give
better results for a given choice of lattice than the type system
for that lattice itself.
Finally, for any program typeable in one of these systems, we show
how to construct an equivalent program which is typeable in a simple
flow-insensitive system. We argue that this general approach could
be useful in a proof-carrying-code setting.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Hunt-Sands-POPL06.pdf},
MONTH = {January}
}
@INPROCEEDINGS{Broberg:Sands:ESOP06,
AUTHOR = {N. Broberg and D. Sands},
TITLE = {Flow Locks: Towards a core calculus for Dynamic Flow Policies},
BOOKTITLE = {Programming Languages and Systems. 15th European Symposium on Programming, ESOP 2006},
YEAR = 2006,
VOLUME = {3924},
SERIES = {LNCS},
ABSTRACT = {Security is rarely a static notion. What is considered to be
confidential or untrusted data varies over time according to
changing events and states. The static verification of secure
information flow has been a popular theme in recent programming
language research, but information flow policies considered are
based on multilevel security which presents a static view of
security levels. In this paper we introduce a very simple mechanism
for specifying dynamic information flow policies, flow locks, which
specify conditions under which data may be read by a certain
actor. The interface between the policy and the code is via
instructions which open and close flow locks. We present a type and
effect system for an ML-like language with references which permits
the completely static verification of flow lock policies, and prove
that the system satisfies a semantic security property generalising
noninterference. We show that this simple mechanism can represent a
number of recently proposed information flow paradigms for
declassification.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Broberg-Sands-ESOP06.pdf},
PUBLISHER = {Springer Verlag}
}
@BOOK{Axelsson:Sands:Understanding,
AUTHOR = {S. Axelsson and D. Sands},
ALTEDITOR = {},
TITLE = {Understanding {I}ntrusion {D}etection through {V}isualization},
PUBLISHER = {Springer},
YEAR = {2006},
OPTKEY = {},
OPTVOLUME = {},
NUMBER = {24},
SERIES = {Advances in Information Security},
URL = {http://www.cs.chalmers.se/~dave/VisBook},
OPTNOTE = {},
OPTANNOTE = {}
}
@INPROCEEDINGS{Sabelfeld:Sands:CSFW05,
TITLE = {Dimensions and Principles of Declassification},
AUTHOR = {Andrei Sabelfeld and David Sands},
BOOKTITLE = {Proceedings of the 18th IEEE Computer Security
Foundations Workshop},
YEAR = {2005},
PUBLISHER = {IEEE Computer Society Press},
PAGES = {255--269},
ABSTRACT = { Computing systems often deliberately release (or declassify)
sensitive information. A principal security concern for systems
permitting information release is whether this release is safe: is
it possible that the attacker compromises the information release
mechanism and extracts more secret information than intended?
While the security community has recognised the importance of the
problem, the state-of-the-art in information release is,
unfortunately, a number of approaches with somewhat unconnected
semantic goals. We provide a road map of the main directions of
current research, by classifying the basic goals according to
\emph{what} information is released, \emph{who} releases
information, \emph{where} in the system information is released and
\emph{when} information can be released.
With a general declassification framework as a long-term goal, we
identify some prudent \emph{principles} of declassification. These
principles shed light on existing definitions and may also
serve as useful ``sanity checks'' for emerging models.},
PDF = {http://www.cs.chalmers.se/~dave/papers/sabelfeld-sands-CSFW05.pdf}
}
@INPROCEEDINGS{Hedin:Sands:Timing,
AUTHOR = {D. Hedin and D. Sands},
TITLE = {Timing Aware Information Flow Security for a
{J}ava{C}ard-like Bytecode},
BOOKTITLE = {First Workshop on Bytecode Semantics, Verification,
Analysis and Transformation (BYTECODE '05)},
YEAR = 2005,
SERIES = {Electronic Notes in Theoretical Computer Science (to
appear)},
ABSTRACT = {Common protection mechanisms fail to provide \emph{end-to-end} security;
programs with legitimate access to secret information are not prevented from
leaking this to the world. Information-flow aware analyses track the flow of
information through the program to prevent such leakages, but often ignore
information flows through \emph{covert channels} even though they pose a
serious threat. A typical covert channel is to use the timing of certain
events to carry information. We present a timing-aware information-flow type
system for a low-level language similar to a non-trivial subset of a sequential
Java bytecode. The type system is parameterized over the time model of the
instructions of the language and over the algorithm enforcing low-observational
equivalence, used in the prevention of implicit and timing flows.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Hedin-Sands-BYTECODE05.pdf}
}
@INPROCEEDINGS{DHS05,
AUTHOR = {\'{A}. Darvas and R. H{\"{a}}hnle and D.
Sands},
TITLE = {A Theorem Proving Approach to Analysis of Secure
Information Flow},
BOOKTITLE = {Proc.\ 2nd International Conference on Security in
Pervasive Computing},
PAGES = {193--209},
URL = {http://www.springerlink.com/link.asp?id=rdqa8ejctda3yw64},
YEAR = {2005},
EDITOR = {Dieter Hutter and Markus Ullmann},
VOLUME = {3450},
SERIES = {LNCS},
PUBLISHER = {Springer-Verlag},
ABSTRACT = {Most attempts at analysing secure information flow in programs
are based on domain-specific logics. Though computationally feasible,
these approaches suffer from the need for abstraction and the high
cost of building dedicated tools for real programming languages. We recast
the information flow problem in a general program logic rather than
a problem-specific one. We investigate the feasibility of this approach by
showing how a general purpose tool for software verification can be used
to perform information flow analyses. We are able to prove security and
insecurity of programs including advanced features such as method calls,
loops, and object types for the target language Java Card. In addition,
we can express declassification of information},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands-SPC05.pdf}
}
@INPROCEEDINGS{Mantel:Sands:APLAS04,
AUTHOR = {H. Mantel and D. Sands},
BOOKTITLE = {Proc. Asian Symp. on Programming Languages and Systems},
TITLE = {Controlled Declassification based on Intransitive Noninterference},
YEAR = 2004,
MONTH = NOV,
VOLUME = 3302,
PAGES = {129--145},
SERIES = {LNCS},
PUBLISHER = {Springer-Verlag},
ABSTRACT = {%
Traditional noninterference cannot cope with common features of secure
systems like channel control, information filtering, or explicit
downgrading. Recent research has addressed the derivation and use of
weaker security conditions that could support such features in a
language-based setting. However, a fully satisfactory solution to the
problem has yet to be found. A key problem is to permit exceptions to
a given security policy without permitting too much. In this article,
we propose an approach that draws its underlying ideas from
\emph{intransitive noninterference}, a concept usually used on a more
abstract specification level. Our results include a new
bisimulation-based security condition that controls tightly where
downgrading can occur and a sound security type system for checking
this condition.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Mantel-Sands-APLAS04.pdf}
}
@TECHREPORT{Mantel:Sands:TR04,
AUTHOR = {H. Mantel and D. Sands},
TITLE = {Controlled Declassification based on Intransitive Noninterference},
YEAR = 2004,
MONTH = NOV,
NUMBER = {2004-06},
SERIES = {Technical Reports in Computing Science at Chalmers University of Technology and G\"oteborg University},
NOTE = {Technical Reports in Computing Science at Chalmers University of Technology and G\"oteborg University.
Extended version of \cite{Mantel:Sands:APLAS04}},
PDF = {http://www.cs.chalmers.se/~dave/papers/Mantel-Sands-TR04.pdf}
}
@INPROCEEDINGS{DHS03,
AUTHOR = {A. Darvas and R. H\"{a}hnle and D. Sands},
TITLE = {A Theorem Proving Approach to Analysis of Secure Information
Flow (preliminary version)},
BOOKTITLE = {Workshop on Issues in the Theory of Security, WITS},
OPTPAGES = {},
NOTE = {Subsumed by \cite{DHS05}},
YEAR = {2003},
EDITOR = {Roberto Gorrieri},
ORGANIZATION = {IFIP WG 1.7, ACM SIGPLAN and GI FoMSESS}
}
@ARTICLE{MSC-SCP,
AUTHOR = {A. Moran and D. Sands and M. Carlsson},
TITLE = {Erratic fudgets: a semantic theory for an embedded coordination language},
JOURNAL = {Science of Computer Programming},
VOLUME = {46},
NUMBER = {1-2},
YEAR = {2003},
ISSN = {0167-6423},
PAGES = {99--135},
DOI = {http://dx.doi.org/10.1016/S0167-6423(02)00088-6},
PUBLISHER = {Elsevier North-Holland, Inc.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Moran-Sands-Carlsson-SCP.pdf},
ABSTRACT = {
The powerful abstraction mechanisms of functional programming languages
provide the means to develop domain-specific programming languages within the
language itself. Typically, this is realised by designing a set of
combinators (higher-order reusable programs) for an application area, and by
constructing individual applications by combining and coordinating individual
combinators. This paper is concerned with a successful example of such an
embedded programming language, namely Fudgets, a library of combinators for
building graphical user interfaces in the lazy functional language Haskell.
The Fudget library has been used to build a number of substantial
applications, including a web browser and a proof editor interface to a proof
checker for constructive type theory. This paper develops a semantic theory
for the non-deterministic stream processors that are at the heart of the
Fudget concept. The interaction of two features of stream processors makes the
development of such a semantic theory problematic:
(i) the sharing of computation provided by the lazy evaluation mechanism
of the underlying host language, and
(ii) the addition of non-deterministic choice needed to handle the
natural concurrency that reactive applications entail.
We demonstrate that this combination of features in a higher-order functional
language can be tamed to provide a tractable semantics theory and induction
principles suitable for reasoning about contextual equivalence of Fudgets.}
}
@INCOLLECTION{Sands:Lambda02,
AUTHOR = {D. Sands and J. Gustavsson and A. Moran},
TITLE = {Lambda Calculi and Linear Speedups},
BOOKTITLE = {The Essence of Computation: Complexity, Analysis, Transformation. Essays Dedicated to Neil D. Jones},
PUBLISHER = {Springer Verlag},
YEAR = 2002,
NUMBER = 2566,
SERIES = {LNCS},
ABSTRACT = {The equational theories at the core of most functional programming
are variations on the standard lambda calculus. The best known
of these is the call-by-value lambda calculus whose core is the
value-beta computation rule.
This paper investigates the transformational power of this core theory of
functional programming. The main result is that the equational theory
of the call-by-value lambda calculus cannot speed up (or slow down)
programs by more than a constant factor. The corresponding result also
holds for call-by-need but we show that it does not hold for call-byname:
there are programs for which a single beta reduction can change
the program's asymptotic complexity.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands-Gustavsson-Moran.pdf}
}
@PROCEEDINGS{Sands:2001:PLS,
EDITOR = {David Sands},
BOOKTITLE = {Programming languages and systems: 10th European
Symposium on Programming, {ESOP} 2001, held as part of
the Joint European Conferences on Theory and Practice
of Software, {ETAPS} 2001, Genova, Italy, April 2--6,
2001: proceedings},
TITLE = {Programming languages and systems: 10th European
Symposium on Programming, {ESOP} 2001, held as part of
the Joint European Conferences on Theory and Practice
of Software, {ETAPS} 2001, Genova, Italy, April 2--6,
2001: proceedings},
VOLUME = {2028},
PUBLISHER = {Springer-Verlag Inc.},
ADDRESS = {New York, NY, USA},
PAGES = {xiii + 431},
YEAR = {2001},
ISBN = {3-540-41862-8 (paperback)},
LCCN = {QA76.6 .E976 2001; QA267.A1 L43 no.2028},
BIBDATE = {Thu Jan 17 11:49:19 MST 2002},
SERIES = {Lecture Notes in Computer Science},
URL = {http://link.springer-ny.com/link/service/series/0558/tocs/t2028.htm},
KEYWORDS = {computer programming -- congresses; programming
languages (electronic computers) -- congresses}
}
@INPROCEEDINGS{Agat:Sands:Confidentiality,
AUTHOR = {J. Agat and D. Sands},
TITLE = {On Confidentiality and Algorithms},
PAGES = {64--77},
EDITOR = {Francis M. Titsworth},
BOOKTITLE = {Proceedings of the 2001 {IEEE} Symposium on Security
and Privacy ({S}&{P}-01)},
MONTH = {May},
PUBLISHER = {IEEE Computer Society},
ABSTRACT = {Recent interest in methods for certifying programs for secure
information flow (noninterference)
have failed to raise a key question: can efficient algorithms
be written so as to satisfy the requirements of secure information flow?
In this paper we discuss how algorithms for searching and sorting can be
adapted to work on collections of secret data without leaking any
confidential information, either directly, indirectly,
or through timing behaviour.
We pay particular attention to the issue of timing channels caused by cache
behaviour, and argue that it is necessary to
disable the effect of the cache in order to
construct algorithms manipulating pointers to objects
in such a way that they satisfy the conditions of noninterference.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/agat-sands-SP01.pdf},
YEAR = {2001}
}
@INPROCEEDINGS{Gustavsson:Sands:Possibilities,
AUTHOR = {J{\"o}rgen Gustavsson and David Sands},
TITLE = {Possibilities and Limitations of Call-by-Need Space
Improvement},
PAGES = {265--276},
MONTH = {September},
YEAR = {2001},
BOOKTITLE = {Proceeding of the Sixth {ACM} {SIGPLAN}
International Conference on Functional Programming
(ICFP'01)},
PUBLISHER = {ACM Press},
ABSTRACT = {
Innocent-looking program transformations can easily change the space
complexity of lazy functional programs. The theory of \emph{space
improvement} seeks to characterise those local program
transformations which are guaranteed never to worsen asymptotic space
complexity of any program. Previous work by the authors introduced the
space improvement relation and showed that a number of simple local
transformation laws are indeed space improvements. This paper seeks an
answer to the following questions: is the improvement relation inhabited
by interesting program transformations, and, if so, how might they be
established? We show that the asymptotic space improvement relation is
semantically badly behaved, but that the theory of \emph{strong space
improvement} possesses a fixed-point induction theorem which permits
the derivation of improvement properties for recursive definitions.
With the help of this tool we explore the landscape of space improvement by
considering a range of classical program transformations. },
PDF = {http://www.cs.chalmers.se/~dave/papers/PossibilitiesICFP01.pdf}
}
@INPROCEEDINGS{Sabelfeld:Sands:Probabilistic,
AUTHOR = {A.~Sabelfeld and D.~Sands},
TITLE = {Probabilistic Noninterference for Multi-threaded Programs},
BOOKTITLE = {Proceedings of the 13th IEEE Computer Security
Foundations Workshop},
PAGES = {200--214},
YEAR = {2000},
PUBLISHER = {IEEE Computer Society Press},
ADDRESS = {Cambridge, England},
MONTH = {July},
PDF = {http://www.cs.chalmers.se/~dave/papers/prob-sabelfeld-sands.pdf},
ABSTRACT = {
A program which has access to your sensitive data presents a
security threat. Does the program keep your secrets secret? To
answer the question one must specify what it means for a program to
be secure, and one may wish to verify the security specification
before executing the program. We present a probability-sensitive
security specification (probabilistic noninterference) for
multi-threaded programs based on a probabilistic
bisimulation. Some previous approaches to specifying
confidentiality rely on a particular scheduler for executing program
threads. This is unfortunate since scheduling policy is typically
outside the language specification for multi-threaded languages. We
describe how to generalise noninterference in order to define robust
security with respect to any particular scheduler used and show, for
a simple imperative language with dynamic thread creation, how the
security condition satisfies compositionality properties which
facilitates a straightforward proof of correctness of e.g. security
type systems.}
}
@INCOLLECTION{Moran:Sands:Carlsson:Erratic,
AUTHOR = {A. K. Moran and D. Sands and M. Carlsson},
TITLE = {Erratic {F}udgets: {A} semantic theory for an embedded
coordination language},
BOOKTITLE = {the Third International Conference on
Coordination Languages and Models; {COODINATION}'99},
SERIES = {Lecture Notes in Computer Science},
NUMBER = {1594},
PAGES = {85--102},
PUBLISHER = {Springer-Verlag},
YEAR = {1999},
MONTH = {April},
NOTE = {Extended available: \cite{MSC-SCP}}
}
@ARTICLE{Sabelfeld:Sands:Per,
AUTHOR = {A. Sabelfeld and D. Sands},
TITLE = {A Per Model of Secure Information Flow in Sequential Programs},
NOTE = {Extended version of \cite{Sabelfeld:Sands:ESOP99}},
JOURNAL = {Higher-Order and Symbolic Computation},
VOLUME = {14},
NUMBER = {1},
PAGES = {59--91},
MONTH = {March},
YEAR = {2001},
PDF = {http://www.cs.chalmers.se/~dave/papers/per-sabelfeld-sands.pdf},
ABSTRACT = { This paper proposes an extensional semantics-based formal
specification of secure information-flow properties in sequential
programs based on representing degrees of security by partial
equivalence relations (pers). The specification clarifies and
unifies a number of specific correctness arguments in the
literature, and connections to other forms of program analysis. The
approach is inspired by (and equivalent to) the use of partial
equivalence relations in specifying binding-time analysis, and is
thus able to specify security properties of higher-order functions
and ``partially confidential data''. We extend the approach to
handle nondeterminism by using powerdomain semantics and show how
\emph{probabilistic security properties} can be formalised by using
probabilistic powerdomain semantics.}
}
@INPROCEEDINGS{Moran:Sands:Need,
AUTHOR = {A. K. Moran and D. Sands},
TITLE = {Improvement in a Lazy Context: An Operational Theory for
Call-By-Need},
BOOKTITLE = {Proc. POPL'99, the 26th ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
YEAR = {1999},
MONTH = {January},
PUBLISHER = {ACM Press},
PAGES = {43--56},
PDF = {http://www.cs.chalmers.se/~dave/papers/cbneed-theory.pdf}
}
@INPROCEEDINGS{Sabelfeld:Sands:ESOP99,
AUTHOR = {A. Sabelfeld and D. Sands},
TITLE = {A Per Model of Secure Information Flow in Sequential Programs},
BOOKTITLE = {Programming Languages and Systems, 8th European Symposium on Programming, ESOP'99},
VOLUME = {1576},
SERIES = {Lecture Notes in Computer Science},
YEAR = {1999},
PUBLISHER = {Springer-Verlag},
PAGES = {40--58},
NOTE = {Extended version in \cite{Sabelfeld:Sands:Per}}
}
@INPROCEEDINGS{Gustavsson:Sands:Foundation,
AUTHOR = {J. Gustavsson and D. Sands},
TITLE = {A Foundation for Space-Safe Transformations of Call-by-Need Programs},
BOOKTITLE = {The Third International Workshop on Higher Order Operational Techniques in Semantics},
YEAR = {1999},
EDITOR = {A. D. Gordon and A. M.Pitts},
VOLUME = {26},
SERIES = {Electronic Notes in Theoretical Computer Science},
PUBLISHER = {Elsevier},
PDF = {http://www.cs.chalmers.se/~dave/papers/space.pdf},
URL = {http://www.elsevier.nl/locate/entcs/volume26.html},
ABSTRACT = {
We introduce a space-improvement relation on programs which guarantees
that whenever M is improved by N, replacement of M by N in a
program can never lead to asymptotically worse space (heap or stack)
behaviour, for a particular model of garbage collection. This study
takes place in the context of a call-by-need programming language. For
languages implemented using call-by-need, e.g, Haskell, space behaviour
is notoriously difficult to predict and analyse, and even
innocent-looking equivalences like x + y = y + x can change the
asymptotic space requirements of some programs. Despite this, we
establish a fairly rich collection of improvement laws, with the help of a
context lemma for a finer-grained improvement relation. We briefly
consider an application of the theory; we prove that inlining of
affine-linear bindings (as introduced by a certain class of ``used-once''
type-systems) is work- and space-safe. We also show that certain weaker
type systems for usage do not provide sufficient conditions for
space-safe inlining.
}
}
@INPROCEEDINGS{Claessen:Sands:Observable,
AUTHOR = {K. Claessen and D. Sands},
TITLE = {Observable Sharing for Functional Circuit Description},
BOOKTITLE = {Advances in Computing Science ASIAN'99; 5th Asian Computing Science Conference},
PAGES = {62--73},
EDITOR = {P.S. Thiagarajan and R. Yap},
VOLUME = {1742},
SERIES = {Lecture Notes in Computer Science},
YEAR = {1999},
PUBLISHER = {Springer-Verlag},
NOTE = {Extended Version Available},
PDF = {http://www.cs.chalmers.se/~dave/papers/observable-sharing.pdf},
ABSTRACT = {
Pure functional programming languages have been proposed as a vehicle to
describe, simulate and manipulate circuit specifications. We propose an
extension to Haskell to solve a standard problem when manipulating data
types representing circuits in a lazy functional language. The problem is
that circuits are finite graphs -- but viewing them as an algebraic (lazy)
datatype makes them indistinguishable from potentially infinite regular
trees. However, implementations of Haskell do indeed represent cyclic
structures by graphs. The problem is that the sharing of nodes that creates
such cycles is not observable by any function which traverses such a
structure. In this paper we propose an extension to call-by-need languages
which makes graph sharing observable. The extension is based on non
updatable reference cells and an equality test (sharing detection) on this
type. We show that this simple and practical extension has well-behaved
semantic properties, which means that many typical source-to-source program
transformations, such as might be performed by a compiler, are still valid
in the presence of this extension.}
}
@UNPUBLISHED{Moran:Sands:Need-Extended,
AUTHOR = {A. K. Moran and D. Sands},
TITLE = {Improvement in a Lazy Context: An Operational Theory for
Call-By-Need (Extended Version)},
NOTE = {Extended version of \cite{Moran:Sands:Need}},
YEAR = {1998},
MONTH = {November},
PDF = {http://www.cs.chalmers.se/~dave/papers/cbneed-theory-extended.pdf},
ABSTRACT = {The standard implementation technique for lazy functional languages is
call-by-need, which ensures that an argument to a function in any given call
is evaluated at most once. A significant problem with call-by-need is that it
is difficult - even for compiler writers - to predict the effects of program
transformations. The traditional theories for lazy functional languages are
based on call-by-name models, and offer no help in determining which
transformations do indeed optimize a program.
In this article we present an operational theory for call-by-need, based upon
an improvement ordering on programs: M is improved by N if in all
program-contexts C, when C[M] terminates then C[N] terminates at least as
cheaply.
We show that this improvement relation satisfies a "context lemma", and
supports a rich inequational theory, subsuming the call-by-need lambda calculi
of Ariola \emph{et al}. The reduction-based call-by-need calculi are
inadequate as a theory of lazy-program transformation since they only permit
transformations which speed up programs by at most a constant factor (a claim
we substantiate); we go beyond the various reduction-based calculi for
call-by-need by providing powerful proof rules for recursion, including
syntactic continuity - the basis of fixed-point-induction style reasoning, and
an Improvement Theorem, suitable for arguing the safetey and correctness of
recursion-based program transformations.
}
}
@INCOLLECTION{Sands:Computing,
AUTHOR = {D. Sands},
TITLE = {Computing with Contexts: {A} simple approach},
BOOKTITLE = {Second Workshop on Higher-Order Operational Techniques in
Semantics (HOOTS II)},
EDITOR = {A. D. Gordon and A. M. Pitts and C. L. Talcott},
SERIES = {Electronic Notes in Theoretical Computer Science},
YEAR = {1998},
VOLUME = {10},
PUBLISHER = {Elsevier Science Publishers B.V.},
URL = {http://www.elsevier.nl/cas/tree/store/tcs/free/noncas/pc/menu.htm},
ABSTRACT = {This article describes how the use of a higher-order syntax
representation of contexts [due to A. Pitts] combines smoothly with
higher-order syntax for evaluation rules, so that definitions can be
extended to work over contexts. This provides "for free" - without the
development of any new language-specific context calculi - evaluation rules
for contexts which commute with hole-filling. We have found this to be a
useful technique for directly reasoning about operational equivalence. A
small illustration is given based on a unique fixed-point induction
principle for a notion of guarded context in a functional language.}
}
@ARTICLE{TCS::HankinMS1998,
TITLE = {Refining multiset tranformers},
AUTHOR = {Chris Hankin and Daniel Le M{\'e}tayer and David
Sands},
PAGES = {233--258},
JOURNAL = {Theoretical Computer Science},
MONTH = {20~feb},
YEAR = {1998},
VOLUME = {192},
NUMBER = {2},
PDF = {http://www.cs.chalmers.se/~dave/papers/Hankin-LeMetayer-Sands-TCS.pdf},
ABSTRACT = {
Gamma is a minimal language based on local multiset rewriting with an
elegant chemical reaction metaphor. The virtues of this paradigm in terms
of systematic program construction and design of parallel programs have
been argued in previous papers. Gamma can also be seen as a notation for
coordinating independent programs in a larger application. In this paper,
we study a notion of refinement for programs involving parallel and
sequential composition operators, and derive a number of programming
laws. The calculus thus obtained is applied in the development of a generic
"pipelining" transformation, which enables certain sequential compositions
to be refined into parallel compositions.
}
}
@INPROCEEDINGS{Sands:Weichert:From,
AUTHOR = {D. Sands and M. Weichert},
TITLE = {From {G}amma to {CBS}:
Refining multiset transformations with broadcasting processes},
MONTH = {January},
YEAR = {1998},
BOOKTITLE = {Proc. of the 31st Hawaii Intl. Conf. on System Sciences},
VOLUME = {VII Software Technology Track},
PAGES = {265--274},
EDITOR = {H. El-Rewini},
ORGANIZATION = {{IEEE} Computer Soc.},
ADDRESS = {Hawai`i},
PDF = {http://www.cs.chalmers.se/~dave/papers/Weichert:Sands98.pdf}
}
@INCOLLECTION{Sands:HOOTS,
AUTHOR = {D. Sands},
TITLE = {Improvement Theory and Its Applications},
PAGES = {275--306},
EDITOR = {A. D. Gordon and A. M. Pitts},
BOOKTITLE = {Higher {O}rder {O}perational {T}echniques in {S}emantics},
PUBLISHER = {Cambridge University Press},
SERIES = {Publications of the Newton Institute},
YEAR = {1998},
ABSTRACT = {An improvement theory is a variant of the standard
theories of observational approximation (or equivalence) in which the basic
observations made of a functional program's execution include some
intensional information about, for example, the program's computational cost.
One program is an improvement of another if its execution is more efficient
in any program context. In this article we give an overview of our work on
the theory and applications of improvement. Applications include reasoning
about time properties of functional programs, and proving the correctness of
program transformation methods. We also introduce a new application, in the
form of some bisimulation-like proof techniques for equivalence, with
something of the flavour of Sangiorgi's ``bisimulation up-to expansion and
context''.},
PDF = {http://www.cs.chalmers.se/~dave/papers/hoots97.pdf}
}
@INPROCEEDINGS{Sands:POPL97,
AUTHOR = {D. Sands},
TITLE = {From {SOS} Rules to Proof Principles: An Operational
Metatheory for Functional Languages},
BOOKTITLE = {Proceedings of
the 24th Annual ACM SIGPLAN-SIGACT Symposium on
Principles of Programming Languages (POPL)},
MONTH = {January},
YEAR = {1997},
PUBLISHER = {ACM Press},
ABSTRACT = {Structural Operational Semantics (SOS) is a widely used
formalism for specifying the computational meaning of programs, and is
commonly used in specifying the semantics of functional languages. Despite
this widespread use there has been relatively little work on the
``metatheory'' for such semantics. As a consequence the operational approach
to reasoning is considered \emph{ad hoc} since the same basic proof
techniques and reasoning tools are reestablished over and over, once for each
operational semantics specification. This paper develops some metatheory for
a certain class of SOS language specifications for functional languages. We
define a rule format, Globally Deterministic SOS (GDSOS), and establish some
proof principles for reasoning about equivalence which are sound for all
languages which can be expressed in this format. More specifically, if the
SOS rules for the operators of a language conform to the syntax of the GDSOS
format, then (i) a syntactic analogy of continuity holds, which relates a
recursive function to its finite unwindings, and forms the basis of a
Scott-style fixed-point induction technique; (ii) a powerful induction
principle called improvement induction holds for a certain class of
instrumented GDSOS semantics; (iii) a useful bisimulation-based coinductive
proof technique for operational approximation (and its ``instrumented''
variants) is sound.},
PDF = {http://www.cs.chalmers.se/~dave/papers/sands-POPL97.pdf}
}
@ARTICLE{Sands:TCS,
AUTHOR = {D. Sands},
TITLE = {Proving the
Correctness of Recursion-Based Automatic Program
Transformations},
JOURNAL = {Theoretical Computer Science},
VOLUME = {167},
NUMBER = {10},
MONTH = {October},
YEAR = {1996},
NOTE = {Preliminary version in TAPSOFT'95, LNCS 915},
ABSTRACT = {This paper shows how the {\em Improvement Theorem}---a semantic
condition for establishing the total correctness of
program transformation on higher-order functional programs---has practical
value in proving the correctness of automatic techniques.
To this end we develop and study a
family of automatic program transformations. The root of this family
is a well-known and widely studied
transformation called {\em deforestation}; descendants include
generalisations to
richer input languages (e.g.\ higher-order functions),
and more powerful transformations, including a source-level representation of
some of the techniques known from Turchin's {\em supercompiler}.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/sands-TCS96.pdf}
}
@ARTICLE{Sands:TOPLAS,
AUTHOR = {D. Sands},
TITLE = {Total Correctness by Local Improvement in the Transformation
of Functional Programs},
JOURNAL = {ACM Transactions on Programming Languages and Systems
(TOPLAS)},
YEAR = {1996},
VOLUME = {18},
NUMBER = {2},
MONTH = {March},
PAGES = {175--234},
NOTE = {Extended version of \cite{Sands:POPL}},
SUMMARY = {The goal of program transformation is to
improve efficiency while preserving meaning. One of the
best-known transformation techniques is Burstall and
Darlington's unfold-fold method. Unfortunately the
unfold-fold method itself guarantees neither improvement
in efficiency nor total correctness. The correctness
problem for unfold-fold is an instance of a strictly more
general problem: transformation by locally
equivalence-preserving steps does not necessarily
preserve (global) equivalence. This article presents a
condition for the total correctness of transformations on
recursive programs, which, for the first time, deals with
higher-order functional languages (both strict and
nonstrict) including lazy data structures. The main
technical result is an improvement theorem which
says that if the local transformation steps are guided by
certain optimization concerns (a fairly natural condition
for a transformation), then correctness of the
transformation follows. The improvement theorem makes
essential use of a formalized improvement theory; as a
rather pleasing corollary it also guarantees that the
transformed program is a formal improvement over the
original. The theorem has immediate practical
consequences: it is a powerful tool for proving the
correctness of existing transformation methods for
higher-order functional programs, without having to
ignore crucial factors such as memoization or
folding, and it yields a simple syntactic method
for guiding and constraining the unfold-fold method in
the general case so that total correctness (and
improvement) is always guaranteed. },
PDF = {http://www.cs.chalmers.se/~dave/papers/sands-TOPLAS96.pdf},
OPTANNOTE = {}
}
@INCOLLECTION{Sands:Composed,
AUTHOR = {D. Sands},
TITLE = {Composed Reduction Systems},
PAGES = {360--377},
BOOKTITLE = {Coordination
Programming: Mechanisms, Models and Semantics},
EDITORS = {J-M Andreoli and C. Hankin and D. Le~{M\'etayer}},
YEAR = {1996},
PUBLISHER = {IC Press, World Scientific},
SUMMARY = {This article studies composed reduction systems:
systems of programs built up from the reduction relations of some reduction
system, by means of parallel and sequential composition operators. The
Calculus of Gamma Programs previously studied by Hankin et al are an instance
of these systems in the case where the reduction relations are a certain
class of multi-set rewriting relations. The trace-based compositional
semantics of composed reduction systems is considered, and a new
graph-representation is introduced as an intermediate program form which
seems well-suited to the study of compositional semantics and program
logics.},
PDF = {http://www.cs.chalmers.se/~dave/papers/red2.pdf}
}
@INCOLLECTION{Sands:Syntactic,
AUTHOR = {D. Sands},
TITLE = {Syntactic Continuity from Structural Operational Semantics},
BOOKTITLE = {Theory and Formal Methods 1996: Proceedings
of the Third Imperial College Workshop on Theory and Formal Methods},
PUBLISHER = {IC Press},
YEAR = {1996},
EDITOR = {G. McCusker and A. Edalat and S. Jourdan},
NOTE = {(subsumed by \cite{Sands:POPL97})}
}
@ARTICLE{Sands:JLC,
AUTHOR = {David Sands},
TITLE = {A Na\"{\i}ve Time Analysis and its
Theory of Cost Equivalence},
JOURNAL = {Journal of Logic and Computation},
YEAR = {1995},
VOLUME = {5},
NUMBER = {4},
OPTPAGES = {495--541},
OPTPOSTSCRIPT = {},
ABSTRACT = {Techniques for reasoning about extensional properties of
functional programs are well-understood, but methods for analysing the
underlying intensional, or operational properties have been much neglected.
This paper begins with the development of a simple but useful
calculus for time analysis of non-strict functional programs with lazy
lists. One limitation of this basic calculus is that the ordinary
equational reasoning on functional programs is not valid. In order to buy
back some of these equational properties we develop a non-standard
operational equivalence relation called {\em cost equivalence}, by
considering the number of computation steps as an ``observable'' component
of the evaluation process. We define this relation by analogy with Park's
definition of bisimulation in {\sc ccs}. This formulation allows us to show
that cost equivalence is a contextual congruence (and thus is substitutive
with respect to the basic calculus) and provides useful proof techniques
for establishing cost-equivalence laws.
It is shown that basic evaluation time can be derived
by demonstrating a certain form of cost equivalence, and we give a
axiomatisation of cost equivalence which complete is with respect to
this application. This shows that cost equivalence subsumes the basic
calculus.
Finally we show how a new operational interpretation of evaluation
demands can be used to provide a smooth interface between this time
analysis and more compositional approaches, retaining the advantages
of both.},
PDF = {http://www.cs.chalmers.se/~dave/papers/JLC-FINAL-VERSION.pdf}
}
@INPROCEEDINGS{Henglein:Sands:PLILP95,
AUTHOR = {Henglein, F and Sands, D},
YEAR = {1995},
TITLE = {A Semantic Model of Binding Times for Safe Partial
Evaluation},
BOOKTITLE = {Programming Languages: Implementations, Logics and
Programs (PLILP'95)},
EDITOR = {Swierstra, S.D. and Hermenegildo, M.},
PUBLISHER = {Springer-Verlag},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {982},
PAGES = {299--320},
MONTH = {},
KEYWORDS = {binding time, ideal model, partial evaluation,
safety, topped domains, monovariance},
SUMMARY = {We develop a semantic model of binding times that is
sufficient to guarantee {\em safety} of a large class of partial evaluators.
In particular, we
\begin{itemize}
\item identify problems of existing models of binding-time properties
based on projections and partial equivalence relations (PER{}s),
which imply that they are not adequate to prove the safety of
simple off-line partial evaluators;
\item
propose a new model for binding times that avoids the potential pitfalls of
projections(PER{}s);
\item
specify binding-time annotations justified by a ``collecting'' semantics,
and clarify the connection between extensional properties (local analysis)
and program annotations (global analysis) necessary to support binding-time
analysis;
\item
prove the safety of a simple but liberal class of
monovariant partial evaluators for a higher-order functional language
with recursive types, based on annotations justified by the model.
\end{itemize}
This is the extended version of the PLILP conference article, including all
relevant proofs.},
PDF = {http://www.cs.chalmers.se/~dave/papers/henglein-sands.pdf}
}
@INPROCEEDINGS{Sands:TAPSOFT,
AUTHOR = {D. Sands},
TITLE = {Correctness of Recursion-Based Automatic Program
Transformations},
NUMBER = {915},
SERIES = {LNCS},
BOOKTITLE = {International Joint Conference on Theory and Practice of
Software Development ({TAPSOFT/FASE} '95)},
YEAR = {1995},
PUBLISHER = {Springer-Verlag},
NOTE = {Full version in \cite{Sands:TCS}}
}
@INPROCEEDINGS{Sands:POPL95,
AUTHOR = {D. Sands},
TITLE = {Total Correctness by Local Improvement in Program
Transformation},
BOOKTITLE = {Proceedings of
the 22nd Annual ACM SIGPLAN-SIGACT Symposium on
Principles of Programming Languages (POPL)},
YEAR = {1995},
PUBLISHER = {ACM Press},
MONTH = {January},
NOTE = {Extended version in \cite{Sands:TOPLAS}}
}
@INPROCEEDINGS{Sands:Higher,
AUTHOR = {D. Sands},
TITLE = {Higher-Order Expression Procedures},
BOOKTITLE = { Proceeding of the ACM SIGPLAN Syposium on Partial
Evaluation
and Semantics-Based Program Manipulation, PEPM'95},
YEAR = {1995},
PUBLISHER = {ACM},
ADDRESS = {New York},
PAGES = {190--201},
ABSTRACT = {Summary: We investigate the soundness of a specialisation technique due to Scherlis, expression procedures, in the context of a higher-order
non-strict language. An expression procedure is a generalised procedure construct which provides a means of expressing contextual properties of
functions, and thereby facilitates the manipulation and contextual specialisation of programs. Generalised programs provide a route to the correct
specialisation of recursive functions, are transformed by means of three basic transformation rules: composition, application and abstraction. In this
paper we show that the expression procedure approach is correct for call-by-name evaluation, including lazy data structures and higher order
functions.
},
URL = {http://www.diku.dk/research-groups/topps/bibliography/1995.html#D-223},
PDF = {http://www.cs.chalmers.se/~dave/papers/ep2.pdf}
}
@INPROCEEDINGS{Sands:Towards,
AUTHOR = {D. Sands},
TITLE = {Towards Operational Semantics of Contexts in Functional Languages},
NUMBER = {NS-94-6},
SERIES = {{BRICS} Notes Series},
BOOKTITLE = {Proceedings of the 6th Nordic Workshop on Programming
Theory},
PAGES = {378--385},
YEAR = {1994},
ADDRESS = {Aahus, Denmark},
ABSTRACT = {We consider operational semantics of contexts (terms with
holes) in the setting of lazy functional languages, with the aim of
providing a balance between operational and compositional reasoning, and a
framework for semantics-based program analysis and manipulation.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands-NWP94.pdf}
}
@INPROCEEDINGS{Sands:TFM,
SEMNO = {D-172},
AUTHOR = {D. Sands},
TITLE = {Laws of Parallel Synchronised Termination},
BOOKTITLE = {Theory and Formal Methods 1993:
Proceedings of the First Imperial College,
Department of Computing,
Workshop on Theory and Formal Methods},
EDITOR = {G.L. Burn and S.J. Gay and M.D. Ryan},
MONTH = {March},
YEAR = {1993},
ADDRESS = {Isle of Thorns, UK},
PUBLISHER = {Springer-Verlag},
SERIES = {Workshops in Computing Series},
ABSTRACT = {The salient feature of the composition operators for
Gamma programs is that for termination, the parallel composition operator
demands that its operands must terminate synchronously. This paper studies
the inequational partial correctness properties of the combination of
sequential and parallel composition operators for Gamma programs, provable
from a particular compositional semantics (Brookes-style transition traces)
and shows that the ``residual program'' input-output laws originally
described by Hankin {\em et al.\/} are also verified by the model.}
}
@INPROCEEDINGS{Sands:93:AParallelProgrammingStyle,
AUTHOR = {C. Hankin and D. Le M\'{e}tayer and D. Sands},
TITLE = {A Parallel Programming Style and its Algebra
of Programs},
BOOKTITLE = {Proceeding of Parallel Architectures and Languages
Europe ({PARLE})},
SEMNO = {D-165},
YEAR = {1993},
EDITOR = {A. Bode and M. Reeve and G. Wolf},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {694},
PAGES = {367--378},
PUBLISHER = {Springer-Verlag},
KEYWORDS = {Parallel Programming Languages: Language Constructs,
Semantics.},
ABSTRACT = {We present a set of primitive program schemes,
which together with just two
basic combining forms provide a suprisingly expressive parallel programming
language. The primitive program schemes (called {\em tropes}) take the
form of parameterised conditional rewrite rules, and the computational
model is a variant of the {\em Gamma}\/ style, in which computation
proceeds by nondeterministic local rewriting of a global multiset.
We consider a number of examples which illustrate the use of tropes and we
study the algebraic properties of the sequential and parallel combining
forms. Using the examples we illustrate the application of these
properties in the verification of some simple program transformations.},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands-PARLE93.pdf}
}
@INPROCEEDINGS{Sands:FMP93,
SEMNO = {D-171},
AUTHOR = {D. Sands},
TITLE = {A Compositional Semantics of Combining Forms
for {G}amma Programs},
BOOKTITLE = {Formal Methods in
Programming and Their Applications, International
Conference, Academgorodok, Novosibirsk, Russia, June/July 1993.},
EDITOR = {D. Bj{\"o}rner and M. Broy and I. Pottosin},
YEAR = {1993},
PAGES = {43--56},
SERIES = {Lecture Notes in Computer Science},
PUBLISHER = {Springer-Verlag},
OPTADDRESS = {},
OPTMONTH = {},
OPTNOTE = {},
SUMMARY = {The Gamma model is a minimal programming language based on local
multiset rewriting (with an elegant chemical reaction metaphor);
Hankin {\em et al\/} derived a calculus of Gamma programs built
from basic reactions and two composition operators, and
applied it to the study of relationships between parallel and
sequential program composition, and related program transformations.
The main shortcoming of the ``calculus of Gamma programs''
is that the refinement and
equivalence laws described are not compositional, so that a refinement
of a sub-program does not necessarily imply a refinement of the
program.
In this paper we address this problem by
defining a compositional (denotational) semantics for
Gamma, based on the {\em transition trace\/} method of Brookes,
and by showing how this can be used to verify substitutive
refinement laws, potentially widening the applicability and scalability
of program transformations previously described.
The compositional semantics is also useful in the study of
relationships between alternative combining forms at a deeper semantic
level. We consider the semantics and properties of
a number of new combining forms for the Gamma
model.
}
}
@TECHREPORT{Hankin:Metayer:Sands:Calculus,
AUTHOR = {C. Hankin and D. Le M\'{e}tayer and D. Sands},
TITLE = {A Calculus of {G}amma Programs},
INSTITUTION = {INRIA, Renne},
MONTH = {October},
YEAR = {1992},
PAGES = {31 pages},
TYPE = {INRIA Research Report},
NUMBER = {1758},
NOTE = {(Also: Imperial College Technical Report DOC 92/22)},
SUMMARY = {}
}
@INPROCEEDINGS{Hankin:Metayer:Sands:CalculusP,
AUTHOR = {C. Hankin and D. Le M\'{e}tayer and D. Sands},
TITLE = {A Calculus of {G}amma Programs},
OPTCROSSREF = {},
OPTKEY = {},
EDITOR = {U. Banerjee and D. Gelernter and A. Nicolau and D. Padua},
OPTVOLUME = {},
NUMBER = {757},
SERIES = {Lecture Notes in Computer Science},
PAGES = {342--355},
BOOKTITLE = {Languages and Compilers
for Parallel Computing, 5th International Workshop},
YEAR = {1992},
OPTORGANIZATION = {},
PUBLISHER = {Springer-Verlag},
OPTADDRESS = {},
MONTH = {August},
OPTNOTE = {},
OPTANNOTE = {}
}
@INPROCEEDINGS{Sands:FSTandTCS,
AUTHOR = {D. Sands},
TITLE = {Time Analysis, Cost Equivalence and Program Refinement},
BOOKTITLE = {Proceedings of the Eleventh Conference on Foundations
of Software Technology and Theoretical Computer Science},
YEAR = {1991},
PAGES = {25--39},
PUBLISHER = {Springer-Verlag},
NUMBER = {560},
SERIES = {Lecture Notes in Computer Science},
MONTH = {December},
NOTE = {See \cite{Sands:JLC} for a much extended and revised version.}
}
@INPROCEEDINGS{Hunt:Sands:PEPM91,
AUTHOR = {S. Hunt and D. Sands},
TITLE = {{B}inding {T}ime {A}nalysis: {A} {N}ew {PER}spective},
BOOKTITLE = {Proceedings of the {A}{C}{M}
Symposium on Partial Evaluation and Semantics-Based Program Manipulation
(PEPM'91)},
YEAR = {1991},
MONTH = {September},
PAGES = {154--164},
NOTE = {ACM SIGPLAN Notices 26(9)},
ABSTRACT = {Given a description of the parameters in a program that
will be known at
partial evaluation time, a {binding time analysis} must determine
which parts of the program are dependent solely on these known parts
(and therefore also known at partial evaluation time).
In this paper a
{binding time analysis} for the {simply typed lambda calculus} is
presented. The analysis takes the form of an {abstract interpretation}
and uses a novel formalisation of the problem of binding time analysis,
based on the use of {partial equivalence relations}. A simple
proof of correctness is achieved by the use of {logical relations}.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/Hunt:Sands:PEPM91.pdf}
}
@INPROCEEDINGS{Sands:Skye,
AUTHOR = {D. Sands},
TITLE = {Operational Theories of Improvement in Functional Languages
(Extended Abstract)},
BOOKTITLE = {Proceedings of the Fourth {G}lasgow Workshop on
Functional Programming},
YEAR = {1991},
SERIES = {Workshops in Computing Series},
PUBLISHER = {{S}pringer-Verlag },
ADDRESS = {Skye},
PAGES = {298--311},
MONTH = {August},
OPTNOTE = {},
ABSTRACT = {In this paper we address the technical foundations essential to the
aim of providing a semantic basis for the formal treatment of relative
efficiency in functional languages. For a general class of
{``functional''} computation systems, we define a family of improvement
preorderings which express, in a variety of ways, when one expression
is more efficient than another. The main results of this paper build
on Howe's study of equality in lazy computation systems, and are
concerned with the question of when a given improvement relation is
subject to the usual forms of (in)equational reasoning (so that, for
example, we can improve an expression by improving any
sub-expression). For a general class of computation systems we
establish conditions on the operators of the language which guarantee
that an improvement relation is a precongruence. In addition, for a
particular higher-order nonstrict functional language, we show that
any improvement relation which satisfies a simple monotonicity
condition with respect to the rules of the operational semantics has
the desired congruence property.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands:Skye.pdf}
}
@PHDTHESIS{Sands:PhDthesis,
AUTHOR = {D. Sands},
TITLE = {Calculi for Time Analysis of Functional Programs},
SCHOOL = {Department of Computing, Imperial College},
YEAR = {1990},
ADDRESS = {University of London},
MONTH = {September},
ABSTRACT = {Techniques for reasoning about extensional properties of
functional programs are well-understood, but methods for analysing the
underlying intensional, or operational properties have been much neglected.
This thesis presents the development of several calculi for time analysis
of functional programs.
We focus on two features, higher-order functions and lazy evaluation, which
contribute much to the expressive power and semantic elegance of
functional languages, but serve to make operational properties more
opaque.
Analysing higher-order functions is problematic because complexity is
dependent not only on the cost of computing, but also on the cost of {\em
applying}\ function-valued expressions. Techniques for statically deriving
programs which compute time-cost in the presence of arbitrary higher-order
functions are developed. The key to this process is the introduction of
syntactic structures called {\em cost-closures}, which enable intensional
properties to be carried by functions. The approach is formalised by the
construction of an appropriate cost-model, against which the correctness of
the derivation is proved. A specific factorisation tactic for reasoning
about higher-order functions out of context is illustrated.
Reasoning about lazy evaluation (ie call-by-name, or more usually,
call-by-need) is problematic because the cost of evaluating an expression
cannot be understood simply from the costs of its sub-expressions. A
direct calculus for reasoning about a call-by-name language with lazy lists
is derived from a simple operational model. In order to extend this
calculus with a restricted form of equational reasoning, a nonstandard
notion of operational approximation called {\em cost-simulation} is
developed, by analogy with {\em (bi)simulation} in CCS.
The problem with calculi of the above form, based directly on an
operational model, is that they do not yield a {\em compositional}
description of cost, and cannot model {\em lazy evaluation}
(graph-reduction) easily. We show how a description of the {\em context} in
which a function is evaluated can be used to parameterise two types of
time-equation: {\em sufficient-time} equations and {\em necessary-time}
equations, which together provide bounds on the exact time-cost of lazy
evaluation. This approach is extended to higher-order functions using a
modification of the cost-closure technique.},
PDF = {http://www.cs.chalmers.se/~dave/papers/PhDthesis.pdf}
}
@INPROCEEDINGS{Sands:ESOP90,
AUTHOR = {D. Sands},
TITLE = {Complexity Analysis for a Lazy Higher-Order Language},
BOOKTITLE = {Proceedings of the Third European Symposium on Programming},
PUBLISHER = {Springer-Verlag},
YEAR = {1990},
MONTH = {May},
PAGES = {361--376},
NUMBER = {432},
SERIES = {{L}{N}{C}{S}},
ABSTRACT = {This paper is concerned with the time-analysis of functional programs.
Techniques which enable us to reason formally
about a program's execution costs have had relatively little attention
in the study of functional programming. We concentrate here on the
construction of
equations which compute the time-complexity of expressions
in a lazy higher-order language.
The problem with higher-order functions is that complexity is dependent on
the cost of applying functional parameters. Structures called {\em
cost-closures} are introduced to allow us to model both functional
parameters {\em and} the cost of their application.
The problem with laziness is that complexity is dependent on {\em context}.Projections are used to characterise the context in
which an expression is evaluated, and cost-equations are parameterised by this
context-description to give a compositional time-analysis.
Using this form of context information we introduce two types
of time-equation:
{\em sufficient-time} equations and {\em necessary-time} equations,
which together provide bounds on the exact time-complexity.
},
PDF = {http://www.cs.chalmers.se/~dave/papers/Sands:ESOP90.pdf}
}
@INPROCEEDINGS{Sands:GFPW,
AUTHOR = {D. Sands},
TITLE = {Complexity Analysis for a Lazy Higher-Order Language},
BOOKTITLE = {Proceedings of the Glasgow Workshop on Functional Programming },
PUBLISHER = {Springer Verlag},
PAGES = {56--79},
SERIES = {Workshops in Computing Series},
MONTH = {August},
YEAR = {1989},
NOTE = {Extended Version of \cite{Sands:ESOP90}}
}
@TECHREPORT{Sands:88,
AUTHOR = {D. Sands},
TITLE = {Complexity Analysis for a Higher Order Language},
INSTITUTION = {Imperial College},
NUMBER = {DOC 88/14},
MONTH = {October},
ABSTRACT = {The aim of program transformation is to increase efficiency whilst
preserving meaning. Techniques which enable us to reason formally
about a program's execution costs are therefore potentially useful
for assisting the program transformation process.
This paper is concerned with the time-analysis of functional programs.
We show how techniques for deriving equations which compute the
time-complexity of a functional program can be extended to
handle arbitrary higher-order functions.
The derivation yields a functional program, is mechanisable, and thus forms
the basis for an analysis tool for higher-order languages.},
YEAR = {1988}
}
The articles presented above are made available to ensure timely dissemination of scholarly and technical work. Copyright and all rights therein are retained by authors or by other copyright holders. All persons copying this information are expected to adhere to the terms and constraints of the respective copyright holder. In most cases, these works may not be reposted without the explicit permission of the copyright holder