From: Mark Tarver
Subject: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184718293.805544.171340@o11g2000prd.googlegroups.com>
\Jon suggested that it would be good to implement some significant
programs in different functional languages for comparison.  He
suggested interpreters for procedural languages like Basic.

QUOTE
There seems to be a great deal of interest from the functional
programming community in benchmarking. ..... I would also like to see
regexps, program evaluators (rewriters, interpreters and compilers)
and other benchmarks
UNQUOTE

Ok; here's a response - let's see if people want to try.

***********************************************************
The task is to implement an interpreter for a language Minim and run a
stock Minim program that adds two numbers x and y together where x =
100000 (10^5) and y = 100000 (10^5) and give the times.
***********************************************************

Minim is a very basic language - fairly close to assembly. Minim can

1. assign number constants to variables
2. assign the value of a variable to a variable
3. decrement or increment a variable
4. compare two values (numbers or variables) by >, < or =.
5. perform if then else tests
6. jump to a tag
7. print a string
8. print a value (i.e. a number or the value of a variable)
9. input a number value from a user into a variable
10. print a new line
11. do AND, NOT and OR boolean tests

Here's Minim Syntax in BNF with comments

<program> := <statement>
             | <statement> <program>;
<statement> := <assignment>
                | <conditional>
                | <goto>
                | <tag>;
                | <print>
                | <input>
<assignment> := (<var> is <val>)     { assign a value to a variable }
                | (++ <var>)         { increment a variable }
                | (-- <var>);        { decrement a variable }
<val> := <constant> | <var>;
<var> := any symbol;
<constant> := any number
<conditional> := (if <test> then <statement> else <statement>);
<test> := (<val> <comp> <val>)
          | (<test> and <test>);     { boolean AND}
          | (<test> or <test>)       {boolean OR}
          | (not <test>);            {boolean NOT}
<comp> := > | < | =;
<goto> := (goto <tag>);              {go to}
<tag> := any symbol
<print> := (print <string>) | (print <val>); nl;  {nl is new line}
<input> := (input <var>);               {input the users response to
var}
<string> := any string;

Here's the stock program to add two numbers together in Minim -
designed to here run under Qi. You should be able to follow it.

[      [print "Add x and y"]
       nl
       [print "Input x: "]
       [input x]
       nl
       [print "Input y: "]
       [input y]
       main
       [if [x = 0] then [goto end] else [goto sub1x]]

       sub1x
       [-- x]
       [++ y]
       [goto main]

       end
       nl
       [print "The total of x and y is "]
       [print y]
       nl]

A Qi Solution
_____________

Here's a type secure implementation of an interpreter for Minim in Qi.
The type theory encapsulates the BNF and is 54 lines of sequent
calculus.\

(synonyms program [statement]
          env [(symbol * number)])

(datatype statement

   Var : symbol; Val : val;
   =========================
   [Var is Val] : statement;

   if (element? Op [++ --])
   Var : symbol;
   =====================
   [Op Var] : statement;

   Test : test; DoThis : statement; DoThat : statement;
   ====================================================
   [if Test then DoThis else DoThat] : statement;

   Tag : symbol;
   ======================
   [goto Tag] : statement;

   Message : string-or-val;
   ============================
   [print Message] : statement;

   Message : string;
   _________________
   Message : string-or-val;

   Message : val;
   _________________
   Message : string-or-val;

   Var : symbol;
   =========================
   [input Var] : statement;

   Tag : symbol;
   _____________
   Tag : statement;)

(datatype test

   if (element? Comp [= > <])
   Val1 : val; Val2: val;
   ======================
   [Val1 Comp Val2] : test;

   if (element? LogOp [and or])
   Test1 : test;
   Test2 : test;
   =============
   [Test1 LogOp Test2] : test;

   Test : test;
   ==================
   [not Test] : test;)

(datatype val

   ______________________________________
   (number? N) : verified >> N : number;

   _______________________________________
   (symbol? S) : verified >> S : symbol;

   Val : symbol;
   _______________
   Val : val;

   Val : number;
   _____________
   Val : val;)

\The program that runs Minim programs is 56 lines of Qi and is given
here.\

(define run
   {program --> env}
    Program -> (run-loop Program Program []))

(define run-loop
   {program --> program --> env --> env}
    [] _ Env -> Env
    [nl | Ss] Program Env -> (do (output "~%") (run-loop Ss Program
Env))
    [Tag | Ss] Program Env -> (run-loop Ss Program Env)	where (symbol?
Tag)
    [[goto Tag] | _] Program Env -> (run-loop (go Tag Program) Program
Env)
    [[Var is Val] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (compute-val Val Env)
Env))
    [[++ Var] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (+ 1 (look-up Var Env))
Env))
    [[-- Var] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (- (look-up Var Env) 1)
Env))
    [[if Test then DoThis else DoThat] | Ss] Program Env
     -> (if (perform-test? Test Env)
            (run-loop [DoThis | Ss] Program Env)
            (run-loop [DoThat | Ss] Program Env))
    [[print M] | Ss] Program Env -> (do (output "~A" (look-up M Env))
                                        (run-loop Ss Program Env))
										where (symbol? M)
    [[print M] | Ss] Program Env -> (do (output "~A" M)
                                        (run-loop Ss Program Env))
    [[input Var] | Ss] Program Env
       -> (run-loop Ss Program (change-env Var (input+ : number)
Env)))

(define compute-val
  {val --> env --> number}
   N _ -> N	where (number? N)
   Var Env -> (look-up Var Env)	where (symbol? Var))

(define go
  {symbol --> program --> program}
   Tag [Tag | Program] -> Program
   Tag [_ | Program] -> (go Tag Program)
   Tag _ -> (error "cannot go to tag ~A~%" Tag))

(define perform-test?
  {test --> env --> boolean}
   [Test1 and Test2] Env -> (and (perform-test? Test1 Env)
                                 (perform-test? Test2 Env))
   [Test1 or Test2] Env -> (or (perform-test? Test1 Env)
                               (perform-test? Test2 Env))
   [not Test] Env -> (not (perform-test? Test Env))
   [V1 = V2] Env -> (= (compute-val V1 Env) (compute-val V2 Env))
   [V1 > V2] Env -> (> (compute-val V1 Env) (compute-val V2 Env))
   [V1 < V2] Env -> (< (compute-val V1 Env) (compute-val V2 Env)))

(define change-env
   {symbol --> number --> env --> env}
    Var Val [] -> [(@p Var Val)]
    Var Val [(@p Var _) | Env] -> [(@p Var Val) | Env]
    Var Val [Binding | Env] -> [Binding | (change-env Var Val Env)])

(define look-up
  {symbol --> env --> number}
   Var [] -> (error "~A is unbound.~%" Var)
   Var [(@p Var Val) | _] -> Val
   Var [_ | Env] -> (look-up Var Env))

\Here is a trial run -

NB: This is run under CLisp which is *much* slower than SBCL.  My
version of SBCL (1.0) for Windows is rather neurotic and I've had to
choose the slower but more stable CLisp.  This means I've probably
lost out by a factor of 4 (at a guess).

Qi 2007, Copyright (C) 2001-2007 Mark Tarver
www.lambdassociates.org
version 9.0 (Turbo-E)


(0-) (tc +)
true

(1+) (turbo +)
true : boolean

(2+) (load "minim.txt")
compiled : unit
statement : unit
test : unit
val : unit
run : ((list statement) --> (list (symbol * number)))
run-loop :
((list statement) -->
 ((list statement) -->
  ((list (symbol * number)) --> (list (symbol * number)))))
compute-val : (val --> ((list (symbol * number)) --> number))
go : (symbol --> ((list statement) --> (list statement)))
perform-test? : (test --> ((list (symbol * number)) --> boolean))
change-env :
(symbol -->
 (number --> ((list (symbol * number)) --> (list (symbol * number)))))
look-up : (symbol --> ((list (symbol * number)) --> number))
typechecked in 22217 inferences.

Real time: 0.875 sec.
Run time: 0.859375 sec.
Space: 11044772 Bytes
GC: 21, GC time: 0.140625 sec.
loaded : symbol

(3+) (time (run [
       [print "Add x and y"]
       nl
       [print "Input x: "]
       [input x]
       nl
       [print "Input y: "]
       [input y]
       main
       [if [x = 0] then [goto end] else [goto sub1x]]

       sub1x
       [-- x]
       [++ y]
       [goto main]

       end
       nl
       [print "The total of x and y is "]
       [print y]
       nl]))
Add x and y
Input x: 100000

Input y: 100000

The total of x and y is 200000

Real time: 12.15625 sec.
Run time: 2.125 sec.
Space: 7210116 Bytes
GC: 14, GC time: 0.03125 sec.
[(@p x 0) (@p y 200000)] : (list (symbol * number))

(4+)

This whole post is a commented Qi program so you can load it into Qi.

Mark \

From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xir8iz2s8.fsf@ruckus.brouhaha.com>
Mark Tarver <··········@ukonline.co.uk> writes:
> The task is to implement an interpreter for a language Minim and run a
> stock Minim program that adds two numbers x and y together where x =
> 100000 (10^5) and y = 100000 (10^5) and give the times.

This is too easy to game.  Think of the obvious Lisp approach of
translating the Minim program into an S-expression and evaluating it.
Now think of an evaluator that automatically invokes an optimizing
compiler and memoizes the resulting machine code.  You see where this
leads.
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184841922.045792.199330@w3g2000hsg.googlegroups.com>
On 18 Jul, 06:41, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Mark Tarver <··········@ukonline.co.uk> writes:
> > The task is to implement an interpreter for a language Minim and run a
> > stock Minim program that adds two numbers x and y together where x =
> > 100000 (10^5) and y = 100000 (10^5) and give the times.
>
> This is too easy to game.  Think of the obvious Lisp approach of
> translating the Minim program into an S-expression and evaluating it.
> Now think of an evaluator that automatically invokes an optimizing
> compiler and memoizes the resulting machine code.  You see where this
> leads.

Ah, but thats a compiler Paul, not an interpreter.

Actually, thats easy in Lisp because Lisp includes many 'impure'
procedural constructions.  I doubt it would be so easy to do in a pure
functional language.

Mark
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f7of3r$4l8$1@online.de>
Mark Tarver schrieb:
> Actually, thats easy in Lisp because Lisp includes many 'impure'
> procedural constructions.  I doubt it would be so easy to do in a pure
> functional language.

Quite to the contrary.
They don't have to do aliasing or dataflow analysis to do that kind of 
optimization. I'd expect that kind of optimization to happen far more 
early in the development cycle of a compiler, and that it will stay more 
aggressive throughout the compiler's lifetime.

Regards,
Jo
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184967807.567415.65700@w3g2000hsg.googlegroups.com>
On 19 Jul, 20:47, Joachim Durchholz <····@durchholz.org> wrote:
> Mark Tarver schrieb:
>
> > Actually, thats easy in Lisp because Lisp includes many 'impure'
> > procedural constructions.  I doubt it would be so easy to do in a pure
> > functional language.
>
> Quite to the contrary.
> They don't have to do aliasing or dataflow analysis to do that kind of
> optimization. I'd expect that kind of optimization to happen far more
> early in the development cycle of a compiler, and that it will stay more
> aggressive throughout the compiler's lifetime.
>
> Regards,
> Jo

\I don't reckon so; Lisp is a very easy target for *compiling* Minim.
Here's the code which runs under the Qi environment.  It takes a Minim
program in one file and outputs the corresponding Lisp to be LOADed
into Qi.\

(define compile-minim-to-lisp
  In Out -> (write-to-file Out
               [TIME [BLOCK [] [TAGBODY | (map compile-statement (read-
file In))]]]))

(define compile-statement
  [++ V] -> [INCF V]
  [-- V] -> [DECF V]
  [goto Tag] -> [GO Tag]
  [if X then Y else Z] -> [if (compile-test X)
                              (compile-statement Y)
                              (compile-statement Z)]
  [input V] -> [SETQ V [READ]]
  [print Message] -> [FORMAT T "~A" Message]
  [Var is Val] -> [SETQ Var Val]
  nl -> [TERPRI]
  Tag -> Tag)

(define compile-test
  [X > Y] -> [> X Y]
  [X = Y] -> [= X Y]
  [X < Y] -> [< X Y]
  [P and Q] -> [and (compile-test P) (compile-test Q)]
  [P or Q] -> [or (compile-test P) (compile-test Q)]
  [not P] -> [not (compile-test P)]
  P -> P)

\The compiled output of this program uses Qi booleans but can be
LOADED into Qi just like any other Lisp program.

(14-) (compile-minim-to-lisp "f.txt" "g.txt")
"g.txt"

(15-) (COMPILE-FILE "g.txt")
P"C:Documents and Settings/User/My Documents/Qi 9.0/g.fas"

(16-) (LOAD "g")

Add x and y
Input x: 100000

Input y: 100000

The total of x and y is 200000

Real time: 12.140625 sec.
Run time: 0.046875 sec.

About 50X faster than my interpreter - even under CLisp.

It is easy for precisely the reason I gave; because Lisp includes
these impure procedural features as part of the language spec.

This is too trivial as a challenge problem and too biased to Lisp,
hence I didn't set it.

Mark\
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f7sgl3$jq8$1@online.de>
Mark Tarver schrieb:
> On 19 Jul, 20:47, Joachim Durchholz <····@durchholz.org> wrote:
>> Mark Tarver schrieb:
>>
>>> Actually, [evaluating expressions at compile time is] easy in
>>> Lisp because Lisp includes many 'impure' procedural
>>> constructions.  I doubt it would be so easy to do in a pure 
>>> functional language.
 >>
>> Quite to the contrary.
>> They don't have to do aliasing or dataflow analysis to do that kind of
>> optimization. I'd expect that kind of optimization to happen far more
>> early in the development cycle of a compiler, and that it will stay more
>> aggressive throughout the compiler's lifetime.
> 
> \I don't reckon so; Lisp is a very easy target for *compiling* Minim.

Now that's another topic.
I was specifically responding to the "preevaluate at compile time" bit, 
now you're talking about - what? Lisp being useful as a target language?

(Maybe I misunderstood your original statement.)

Regards,
Jo
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185021663.597540.18970@r34g2000hsd.googlegroups.com>
>  you're talking about - what? Lisp being useful as a target language?
>
> (Maybe I misunderstood your original statement.)

Exactly - that is what I as talking about.

Mark
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a1d6ba$0$1607$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> Run time: 0.046875 sec.
>
> About 50X faster than my interpreter - even under CLisp.

Then its performance is comparable to my OCaml interpreter (0.043s). Note
that this isn't surprising because this benchmark only tests a part of
CLisp that is about the size of my interpreter (scaled by the relative
verbosity of Lisp, of course).

> It is easy for precisely the reason I gave; because Lisp includes
> these impure procedural features as part of the language spec.

So you're saying Lisp might beat Haskell? That's great but don't forget: its
the taking part that counts.

> This is too trivial as a challenge problem and too biased to Lisp,
> hence I didn't set it.

I'm not sure that this is biased towards Lisp. I'd write a Minim -> C
compiler in OCaml...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gh4ejF3h2e6mU1@mid.individual.net>
Mark Tarver wrote:
> On 18 Jul, 06:41, Paul Rubin <·············@NOSPAM.invalid> wrote:
>> Mark Tarver <··········@ukonline.co.uk> writes:
>>> The task is to implement an interpreter for a language Minim and run a
>>> stock Minim program that adds two numbers x and y together where x =
>>> 100000 (10^5) and y = 100000 (10^5) and give the times.
>> This is too easy to game.  Think of the obvious Lisp approach of
>> translating the Minim program into an S-expression and evaluating it.
>> Now think of an evaluator that automatically invokes an optimizing
>> compiler and memoizes the resulting machine code.  You see where this
>> leads.
> 
> Ah, but thats a compiler Paul, not an interpreter.

...and why would that matter?!?

[The following should be quite fast - but I haven't performed any test 
runs. I am interested in your results, though. It should be possible to 
squeeze out more by adding declarations...]


(defvar *tag-table* '())
(defvar *var-table* '())

(defun eval-statements (statement more-statements)
   (flet ((cont ()
            (when more-statements
              (eval-statements
                (car more-statements)
                (cdr more-statements)))))
     (etypecase statement
       (symbol (if (eq statement 'nl)
                 (terpri)
                 (setf (getf *tag-table* statement)
                       (cons statement more-statements)))
               (cont))
       (cons (case (car statement)
               (if (destructuring-bind
                       (<if> test <then> then <else> else) statement
                     (declare (ignore <if>))
                     (assert (and (eq <then> 'then) (eq <else> 'else)))
                     (if (eval-test test)
                       (eval-statements then more-statements)
                       (eval-statements else more-statements))))
               (goto (destructuring-bind
                         (<goto> tag) statement
                       (declare (ignore <goto>))
                       (let ((continuation
                               (or (getf *tag-table* tag)
                                   (member tag more-statements))))
                         (eval-statements
                           (car continuation) (cdr continuation)))))
               (print (destructuring-bind
                          (<print> value) statement
                        (declare (ignore <print>))
                        (princ (eval-value value))
                        (finish-output)
                        (cont)))
               (input (destructuring-bind
                          (<input> var) statement
                        (declare (ignore <input>))
                        (assert (symbolp var))
                        (setf (getf *var-table* var) (read))
                        (cont)))
               (++ (destructuring-bind
                       (<++> var) statement
                     (declare (ignore <++>))
                     (assert (symbolp var))
                     (incf (getf *var-table* var))
                     (cont)))
               (-- (destructuring-bind
                       (<--> var) statement
                     (declare (ignore <-->))
                     (assert (symbolp var))
                     (decf (getf *var-table* var))
                     (cont)))
               (otherwise (destructuring-bind
                              (var1 <is> var2) statement
                            (assert (and (symbolp var1)
                                         (eq <is> 'is)
                                         (symbolp var2)))
                            (setf (getf *var-table* var1)
                                  (getf *var-table* var2))
                            (cont))))))))

(defun eval-value (value)
   (typecase value
     (symbol (getf *var-table* value))
     (otherwise (assert (atom value)) value)))

(defun eval-test (test)
   (if (eq (car test) 'not)
     (destructuring-bind (<not> test) test
       (declare (ignore <not>))
       (not (eval-test test)))
     (destructuring-bind (arg1 op arg2) test
       (ecase op
         (> (> (eval-value arg1) (eval-value arg2)))
         (< (< (eval-value arg1) (eval-value arg2)))
         (= (= (eval-value arg1) (eval-value arg2)))
         (and (and (eval-test arg1) (eval-test arg2)))
         (or (or (eval-test arg1) (eval-test arg2)))))))

(defun eval-minim (statements)
   (eval-statements (car statements) (cdr statements)))

(define-compiler-macro eval-minim (&whole whole statements)
   (unless (and (consp statements) (eq (car statements) 'quote))
     (return-from eval-minim whole))
   (let ((variables '()))
     (labels ((translate-statement (statement)
                (etypecase statement
                  (symbol (if (eq statement 'nl) '(terpri) statement))
                  (cons (case (car statement)
                          (if (destructuring-bind
                                  (<if> test <then> then <else> else)
                                  statement
                                (declare (ignore <if>))
                                (assert (and (eq <then> 'then)
                                             (eq <else> 'else)))
                                `(if ,(translate-test test)
                                   ,(translate-statement then)
                                   ,(translate-statement else))))
                          (goto (destructuring-bind
                                    (<goto> tag) statement
                                  (declare (ignore <goto>))
                                  `(go ,tag)))
                          (print (destructuring-bind
                                     (<print> value) statement
                                   (declare (ignore <print>))
                                   `(progn
                                      (princ ,(translate-value value))
                                      (finish-output))))
                          (input (destructuring-bind
                                     (<input> var) statement
                                   (declare (ignore <input>))
                                   (assert (symbolp var))
                                   (pushnew var variables)
                                   `(setq ,var (read))))
                          (++ (destructuring-bind
                                  (<++> var) statement
                                (declare (ignore <++>))
                                (assert (symbolp var))
                                (pushnew var variables)
                                `(incf ,var)))
                          (-- (destructuring-bind
                                  (<--> var) statement
                                (declare (ignore <-->))
                                (assert (symbolp var))
                                (pushnew var variables)
                                `(decf ,var)))
                          (otherwise (destructuring-bind
                                         (var1 <is> var2) statement
                                       (assert (and (symbolp var1)
                                                    (eq <is> 'is)
                                                    (symbolp var2)))
                                       (pushnew var1 variables)
                                       (pushnew var2 variables)
                                       `(setq ,var1 ,var2)))))))
              (translate-test (test)
                (if (eq (car test) 'not)
                  (destructuring-bind (<not> test) test
                    (declare (ignore <not>))
                    `(not ,(translate-test test)))
                  (destructuring-bind (arg1 op arg2) test
                    (ecase op
                      ((> < =) `(,op ,(translate-value arg1)
                                     ,(translate-value arg2)))
                      ((and or) `(,op ,(translate-test arg1)
                                      ,(translate-test arg2)))))))
              (translate-value (value)
                (when (symbolp value) (pushnew value variables))
                (assert (atom value)) value))
       (let ((new-statements
               (mapcar #'translate-statement (cadr statements))))
         `(prog ,variables ,@new-statements)))))

(defun test-minim ()
   (eval-minim
    '((print "Add x and y")
      nl
      (print "Input x: ")
      (input x)
      nl
      (print "Input y: ")
      (input y)
      nl
      main
      (if (x = 0) then (goto end) else (goto sub1x))

      sub1x
      (-- x)
      (++ y)
      (goto main)

      end
      nl
      (print "The total of x and y is ")
      (print y)
      nl)))

;-)


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a37d77$0$1618$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Costanza wrote:
> ...and why would that matter?!?

Indeed.

> [The following should be quite fast - but I haven't performed any test
> runs. I am interested in your results, though. It should be possible to
> squeeze out more by adding declarations...]

By my measurements, this is 2-3x longer and twice as fast as the fastest
OCaml interpreter. By the looks of the code it is using a macro to
translate to Lisp and then compiling, in which case I am surprised the
performance is not better. I assume it is doing a very simple translation
to rather inefficient Lisp?

I'll code up an OCaml equivalent. Should be a good test of the new camlp4
although I'm not sure how to translate the gotos into OCaml...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5ghdvfF3fdejsU1@mid.individual.net>
Jon Harrop wrote:
> Pascal Costanza wrote:
>> ...and why would that matter?!?
> 
> Indeed.
> 
>> [The following should be quite fast - but I haven't performed any test
>> runs. I am interested in your results, though. It should be possible to
>> squeeze out more by adding declarations...]
> 
> By my measurements, this is 2-3x longer and twice as fast as the fastest
> OCaml interpreter. By the looks of the code it is using a macro to
> translate to Lisp and then compiling, in which case I am surprised the
> performance is not better. I assume it is doing a very simple translation
> to rather inefficient Lisp?

Yes. As I said, there are no type declarations in the generated code. It 
should be straightforward to declare all variables as integers, but then 
all incoming values from (input var) forms have to be type checked. If 
you insist, I can make the necessary changes.

What is neat about this solution here is that the interpreter is still 
available, so you can call eval-minim on computed values as well. The 
translation to Lisp code only occurs at compile time when the compiler 
macro sees a constant value for the program passed to eval-minim, so you 
get the advantages of both an interpreter and a compiler. This is a 
standard practice for optimizing code in Common Lisp, BTW. (To put it 
differently, compiler macros allow you to define ad hoc partial 
evaluations that can take advantage of domain-specific knowledge.)

It is possible to perform the translation to Lisp code on the fly, by 
performing manual dynamic compilation. Then the translation could also 
be done on computed values (but the runtime would be increased by the 
compilation time itself, so a real gain in overall efficiency is much 
harder to achieve this way). This would require a lot more code, though.

> I'll code up an OCaml equivalent. Should be a good test of the new camlp4
> although I'm not sure how to translate the gotos into OCaml...

A standard way to express gotos in functional languages is to turn each 
tag into a function which ends in a call to the immediately following 
tag. If your language is tail recursive, there is no real difference to 
a goto in that case.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5gjq6rF3g058gU1@mid.individual.net>
Pascal Costanza wrote:
> Jon Harrop wrote:
>> Pascal Costanza wrote:
>>> ...and why would that matter?!?
>>
>> Indeed.
>>
>>> [The following should be quite fast - but I haven't performed any test
>>> runs. I am interested in your results, though. It should be possible to
>>> squeeze out more by adding declarations...]
>>
>> By my measurements, this is 2-3x longer and twice as fast as the fastest
>> OCaml interpreter. By the looks of the code it is using a macro to
>> translate to Lisp and then compiling, in which case I am surprised the
>> performance is not better. I assume it is doing a very simple translation
>> to rather inefficient Lisp?
> 
> Yes. As I said, there are no type declarations in the generated code. It 
> should be straightforward to declare all variables as integers, but then 
> all incoming values from (input var) forms have to be type checked. 

Here is a new version to try out. Note that I declared the variables as 
fixnum, which means that they wrap around beyond a certain threshold. 
That number is implementation-dependent and is stored in the constant 
MOST-POSITIVE-FIXNUM. For example, in SBCL it's 536870911. If that's too 
small for your tests, change the *variable-type* parameter to 'integer 
and recompile. (In Common Lisp, integer is unlimited for correctness.) 
It's also fun to call (disassemble 'test-minim) after compilation (with 
any type).

I have also fixed the bug in the interpreter which I reported on before.

As an additional note: The code is roughly 2x longer than necessary 
because of the optimization added via the compiler macro. In a more 
realistic scenario, you would use the interpreter only, and only 
optimize the code as soon as you notice that there is a performance 
bottleneck here. In an even more realistic scenario, you would probably 
use a pure macro version from the start, but Mark wanted an interpreter, 
so there you go.


Cheers,
Pascal


(defvar *tag-table* '())
(defvar *var-table* '())
(defvar *program*)

;;
;; change this parameter for different variable types
;;
(eval-when (:compile-toplevel :load-toplevel :execute)
   (defparameter *variable-type* 'fixnum))


(defun eval-statements (statement more-statements)
   (flet ((cont () (when more-statements
                     (eval-statements
                       (car more-statements)
                       (cdr more-statements)))))
     (etypecase statement
       (symbol (if (eq statement 'nl)
                 (terpri)
                 (setf (getf *tag-table* statement)
                       (cons statement more-statements)))
               (cont))
       (cons (case (car statement)
               (if (destructuring-bind
                       (<if> test <then> then <else> else) statement
                     (declare (ignore <if>))
                     (assert (and (eq <then> 'then) (eq <else> 'else)))
                     (if (eval-test test)
                       (eval-statements then more-statements)
                       (eval-statements else more-statements))))
               (goto (destructuring-bind
                         (<goto> tag) statement
                       (declare (ignore <goto>))
                       (let ((continuation (or (getf *tag-table* tag)
                                               (member tag *program*))))
                         (eval-statements
                           (car continuation)
                           (cdr continuation)))))
               (print (destructuring-bind
                          (<print> value) statement
                        (declare (ignore <print>))
                        (princ (eval-value value))
                        (finish-output)
                        (cont)))
               (input (destructuring-bind
                          (<input> var) statement
                        (declare (ignore <input>))
                        (assert (symbolp var))
                        (setf (getf *var-table* var) (read))
                        (cont)))
               (++ (destructuring-bind
                       (<++> var) statement
                     (declare (ignore <++>))
                     (assert (symbolp var))
                     (incf (getf *var-table* var))
                     (cont)))
               (-- (destructuring-bind
                       (<--> var) statement
                     (declare (ignore <-->))
                     (assert (symbolp var))
                     (decf (getf *var-table* var))
                     (cont)))
               (otherwise (destructuring-bind
                              (var1 <is> var2) statement
                            (assert (and (symbolp var1)
                                         (eq <is> 'is)
                                         (symbolp var2)))
                            (setf (getf *var-table* var1)
                                  (getf *var-table* var2))
                            (cont))))))))

(defun eval-value (value)
   (typecase value
     (symbol (getf *var-table* value))
     (otherwise (assert (atom value)) value)))

(defun eval-test (test)
   (if (eq (car test) 'not)
     (destructuring-bind (<not> test) test
       (declare (ignore <not>))
       (not (eval-test test)))
     (destructuring-bind (arg1 op arg2) test
       (ecase op
         (> (> (eval-value arg1) (eval-value arg2)))
         (< (< (eval-value arg1) (eval-value arg2)))
         (= (= (eval-value arg1) (eval-value arg2)))
         (and (and (eval-test arg1) (eval-test arg2)))
         (or (or (eval-test arg1) (eval-test arg2)))))))

(defun eval-minim (statements)
   (let ((*program* statements))
     (eval-statements (car statements) (cdr statements))))

(define-compiler-macro eval-minim (&whole whole statements)
   (unless (and (consp statements) (eq (car statements) 'quote))
     (return-from eval-minim whole))
   (let ((variables '()))
     (labels ((translate-statement (statement)
                (etypecase statement
                  (symbol (if (eq statement 'nl) '(terpri) statement))
                  (cons (case (car statement)
                          (if (destructuring-bind
                                  (<if> test <then> then <else> else)
                                  statement
                                (declare (ignore <if>))
                                (assert (and (eq <then> 'then)
                                             (eq <else> 'else)))
                                `(if ,(translate-test test)
                                   ,(translate-statement then)
                                   ,(translate-statement else))))
                          (goto (destructuring-bind
                                    (<goto> tag) statement
                                  (declare (ignore <goto>))
                                  `(go ,tag)))
                          (print (destructuring-bind
                                     (<print> value) statement
                                   (declare (ignore <print>))
                                   `(progn
                                      (princ ,(translate-value value))
                                      (finish-output))))
                          (input (destructuring-bind
                                     (<input> var) statement
                                   (declare (ignore <input>))
                                   (assert (symbolp var))
                                   (pushnew var variables)
                                   (let ((input (gensym)))
                                     `(let ((,input (read)))
                                        (check-type
                                          ,input ,*variable-type*)
                                        (setq ,var ,input)))))
                          (++ (destructuring-bind
                                  (<++> var) statement
                                (declare (ignore <++>))
                                (assert (symbolp var))
                                (pushnew var variables)
                                `(incf ,var)))
                          (-- (destructuring-bind
                                  (<--> var) statement
                                (declare (ignore <-->))
                                (assert (symbolp var))
                                (pushnew var variables)
                                `(decf ,var)))
                          (otherwise (destructuring-bind
                                         (var1 <is> var2) statement
                                       (assert (and (symbolp var1)
                                                    (eq <is> 'is)
                                                    (symbolp var2)))
                                       (pushnew var1 variables)
                                       (pushnew var2 variables)
                                       `(setq ,var1 ,var2)))))))
              (translate-test (test)
                (if (eq (car test) 'not)
                  (destructuring-bind (<not> test) test
                    (declare (ignore <not>))
                    `(not ,(translate-test test)))
                  (destructuring-bind (arg1 op arg2) test
                    (ecase op
                      ((> < =) `(,op ,(translate-value arg1)
                                     ,(translate-value arg2)))
                      ((and or) `(,op ,(translate-test arg1)
                                      ,(translate-test arg2)))))))
              (translate-value (value)
                (when (symbolp value) (pushnew value variables))
                (assert (atom value)) value))
       (let ((new-statements
               (mapcar #'translate-statement (cadr statements))))
         `(prog ,(loop for var in variables collect `(,var 0))
            (declare (optimize (speed 3) (safety 0) (debug 0)
                               (compilation-speed 0))
                     (,*variable-type* ,@variables))
            ,@new-statements)))))

(defun test-minim ()
   (eval-minim
    '((print "Add x and y")
      nl
      (print "Input x: ")
      (input x)
      nl
      (print "Input y: ")
      (input y)
      nl
      main
      (if (x = 0) then (goto end) else (goto sub1x))

      sub1x
      (-- x)
      (++ y)
      (goto main)

      end
      nl
      (print "The total of x and y is ")
      (print y)
      nl)))

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5gha3vF3ep00uU1@mid.individual.net>
Pascal Costanza wrote:
> Mark Tarver wrote:
>> On 18 Jul, 06:41, Paul Rubin <·············@NOSPAM.invalid> wrote:
>>> Mark Tarver <··········@ukonline.co.uk> writes:
>>>> The task is to implement an interpreter for a language Minim and run a
>>>> stock Minim program that adds two numbers x and y together where x =
>>>> 100000 (10^5) and y = 100000 (10^5) and give the times.
>>> This is too easy to game.  Think of the obvious Lisp approach of
>>> translating the Minim program into an S-expression and evaluating it.
>>> Now think of an evaluator that automatically invokes an optimizing
>>> compiler and memoizes the resulting machine code.  You see where this
>>> leads.
>>
>> Ah, but thats a compiler Paul, not an interpreter.
> 
> ...and why would that matter?!?
> 
> [The following should be quite fast - but I haven't performed any test 
> runs. I am interested in your results, though. It should be possible to 
> squeeze out more by adding declarations...]

...squeeze out more efficiency, I mean...

> (defvar *tag-table* '())

I just noticed that there is a bug in the handling of the *tag-table*, 
but that should be easy to fix...


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184854966.960019.62970@m37g2000prh.googlegroups.com>
QUOTE
Not meaning to be rude, but why are the Qi and PILS implementations so
slow?

This is just looping and incrementing 100,000 times and you guys are
getting
times ~1s? That means you're doing O(10,000) machine operations per
loop of
the minim code, which is just crazy.

Mathematica is the slowest language that I have access to and even it
only
takes 0.27s to complete this problem:

let x = ref 1000000
let y = ref 1000000

let () =
while !x>0 do
decr x;
incr y;
done;
Printf.printf "y=%d\n%!" !y

and running it using OCaml's bytecode interpreter takes only 0.004s so
it is
20x faster than my naive term-level interpreter, which sounds about
right.
I can't imagine what you're doing to make it run another 200 times
slower
though...
UNQUOTE

Come on Jon; that's trying it on :).  Of course if you take my Minim
program
and hand compile it into another language - even a slow one like
Mathematica
- it will run faster than my interpreter.  I can hand compile it into
procedural
Lisp and I guarantee it will be blazingly quick.  You can't make
meaningful comparisons
like that.

But you want a faster interpreter than the one I wrote - OK see my
next post.
(You'll undoubtedly see it before I do thanks to Google).

Nice try, but no cigar and early bedtime with no tea.

Mark

PS This has appeared in the right thread but not in exactly the right
place because
Google still thinks its yesterday and therefore Jon has not made any
such post as the
one to which I am replying which according to Google I will not reply
to until tomorrow.
Confused?  Don't worry about it.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007071913591111272-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-19 10:22:46 -0400, Mark Tarver <··········@ukonline.co.uk> said:

> Of course if you take my Minim
> program
> and hand compile it into another language - even a slow one like
> Mathematica
> - it will run faster than my interpreter.  I can hand compile it into
> procedural
> Lisp and I guarantee it will be blazingly quick.  You can't make
> meaningful comparisons
> like that.

You seem to have mistaken Jon for a legitimate, fair minded, 
correspondent to this newsgroup. He is a spammer trying to sell his 
ocaml and f# consulting services. He will never post anything unless it 
shows these two languages, from which he earns his living, in a 
favorable light, whether the comparison is fair or not.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469f84f3$0$1608$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> Come on Jon; that's trying it on :).  Of course if you take my Minim
> program and hand compile it into another language - even a slow one like
> Mathematica - it will run faster than my interpreter.

I don't understand why you would think that.

> I can hand compile it into procedural Lisp and I guarantee it will be
> blazingly quick.  You can't make meaningful comparisons like that. 

These results are all for interpreters (no hand compiling, except the
hard-coded Minim program in your Qi):

Mark's Qi: 2s
Ole's PILS: 2s
Mathematica: 0.3s
Jon's OCaml: 0.08s

How do you explain the differences?

> But you want a faster interpreter than the one I wrote - OK see my
> next post.
> (You'll undoubtedly see it before I do thanks to Google).
> 
> Nice try, but no cigar and early bedtime with no tea.

I'm waiting. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184883855.101754.108240@g4g2000hsf.googlegroups.com>
> These results are all for interpreters (no hand compiling, except the
> hard-coded Minim program in your Qi):
>
> Mark's Qi: 2s
> Ole's PILS: 2s
> Mathematica: 0.3s
> Jon's OCaml: 0.08s
>
> How do you explain the differences?

Ok; not majorly important but the 2s is only for Qi under CLisp.Here
is Qi on an experimental SBCL.

This is experimental prerelease support for the Windows platform: use
at your own risk.  "Your Kitten of Death awaits!"

............................................

(3+) (time (run [
       [print "Add x and y"]
       nl
       [print "Input x: "]
       [input x]
       nl
       [print "Input y: "]
       [input y]
       main
       [if [x = 0] then [goto end] else [goto sub1x]]

       sub1x
       [-- x]
       [++ y]
       [goto main]

       end
       nl
       [print "The total of x and y is "]
       [print y]
       nl]))
Add x and y
Input x: 10000

Input y: 10000

The total of x and y is 20000
Evaluation took:
  6.75 seconds of real time
  0.03125 seconds of user run time
  0.0 seconds of system run time
  0 calls to %EVAL
  0 page faults and
  655,344 bytes consed.
[(@p x 0) (@p y 20000)] : (list (symbol * number))

Sadly the kitten of death gets me after this, because I suppose, its
an experimental port.   At the cost of much pain I could run this
under CMUCL, but I loathe using Linux and its a drag installing 9.0
under my Ubuntu.  But project the above timing by x10 and you're
looking at a ball park figure of 0.3-0.4s under a well-configured
Lisp; that is at least 5 times faster than my 2.0s under CLisp.

But thats not the main issue.  The main thing is that you are
comparing hand-compiled equivalents to my Minim program, written in
native Mathematica and OCaml, to Qi *interpreting* a Minim program.
Not commensurable at all.

Now regarding my Minim program being 'hard-coded'.  I really don't
know what that means here.  Your Mathematica program is certainly hard-
coded.  The Minim program corresponds exactly to the BNF laid down -
its not tweaked at all.

Mark
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <JkTni.20948$j7.378750@news.indigo.ie>
Mark Tarver wrote:

> Sadly the kitten of death gets me after this, because I suppose, its
> an experimental port.  

Maybe.  Turns out that having (tc +) on in qi 7.3 will crash under sbcl
1.0.7 on linux on load of the source of your minim implementation on my
machine, with a joyful and rather unusual "Unhandled memory fault at
#x0."

> At the cost of much pain I could run this
> under CMUCL, but I loathe using Linux

Well, hey, I loathe using Windows.  Strange though.  Are you
sure you loathe using Linux?  Maybe you just loathe a 
particular desktop environment for Linux like GNOME?  If you have
ubuntu, try "apt-get install kubuntu-desktop" (or use the package
management GUI), and select a KDE session instead of a GNOME one upon
next login. 

> Now regarding my Minim program being 'hard-coded'.  I really 
> don't  know what that means here. 

Well, obviously you're not compiling the input expression into the
program which is what "hard coding" would usually suggest to me (like
jon hard coded the input expression into his ml program rather than
reading it at run time in the previous "simplify" example).  He may
have some issue with you supplying the minim program at runtime to the
interpreter as lists though. That's not what I'd call "hard coding".  It
is skipping lexing, or at least reusing the lisp reader's
[sorta-]lexing instead of writing your own, though.  There are of
course lexer packages for use in lisp around, though I haven't used
them much myself, e.g. Michael Parker's 
http://www.geocities.com/mparker762/clawk.html
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184912250.401047.174720@r34g2000hsd.googlegroups.com>
Turns out that having (tc +) on in qi 7.3 will crash under sbcl
> 1.0.7 on linux on load of the source of your minim implementation on my
> machine, with a joyful and rather unusual "Unhandled memory fault at
> #x0."

Can't comment on this - CLisp compiles and runs it fine.
My 1.0.6 SBCL crashes worse than 1.0 with a different error message.
1.0 sometimes crashes while loading this program and sometimes it
loads it.  I think on balance of probability there is a problem in
SBCL somewhere.   This is why I generally use the slower but more
stable CLisp.  Try it.

>
> > At the cost of much pain I could run this
> > under CMUCL, but I loathe using Linux
>
> Are you> sure you loathe using Linux?  

OK; let me check .... hmmm, yes, yes I do.

> He may
> have some issue with you supplying the minim program at runtime to the
> interpreter as lists though. That's not what I'd call "hard coding".  

Agreed.

> It
> is skipping lexing, or at least reusing the lisp reader's
> [sorta-]lexing instead of writing your own, though.  

OK; that's a different point. Actually Qi has its own lexer and all
the input is going through that. I was not assuming that people needed
to implement a lexer, but that might be needed for certain languages
which be limited to string entry etc.  But my entry corresponds to the
BNF for Minim.

Mark
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a03e9a$0$1622$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> But thats not the main issue.  The main thing is that you are
> comparing hand-compiled equivalents to my Minim program, written in
> native Mathematica and OCaml, to Qi *interpreting* a Minim program.
> Not commensurable at all.

No, I'm comparing your interpreter to my interpreter (the one I posted).

> Now regarding my Minim program being 'hard-coded'.  I really don't
> know what that means here.  Your Mathematica program is certainly hard-
> coded.

Yes.

> The Minim program corresponds exactly to the BNF laid down - its not
> tweaked at all. 

You gave a BNF:

<program> := <statement>
             | <statement> <program>;
<statement> := <assignment>
                | <conditional>
                | <goto>
...

But it appears that your program does not include a lexer and parser,
instead having the Minim program hard-coded in the Qi source code:

(3+) (time (run [
       [print "Add x and y"]
       nl
       [print "Input x: "]
...

My interpreter loads its input program from the specified text file. I think
that is an important difference in terms of functionality. Perhaps I have
misunderstood.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184914065.254432.317740@q75g2000hsh.googlegroups.com>
> But it appears that your program does not include a lexer and parser,
> instead having the Minim program hard-coded in the Qi source code:
>
> (3+) (time (run [
>        [print "Add x and y"]
>        nl
>        [print "Input x: "]
> ...
>
> My interpreter loads its input program from the specified text file. I think
> that is an important difference in terms of functionality. Perhaps I have
> misunderstood.

See remark above, just put my line entry into a file and load it.  The
parsing is done by the Qi type checker based on the spec in my
interpreter program.

If you want to seperate out the reading from the execution then define

(define run-minim
  {string --> symbol}
  File -> (time (load File)))

and put (run <put your Minim program here> into the file.

(6+) (run-minim "minim add.txt")
Add x and y
Input x: ... etc.

Hack my Minim program and put in an error (missing then)

[if [x = 0] [goto end] else [goto sub1x]]

(7+) (run-minim "minim add.txt")
error: type error

Mark
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a0726a$0$1621$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> Hack my Minim program and put in an error (missing then)
> 
> [if [x = 0] [goto end] else [goto sub1x]]
> 
> (7+) (run-minim "minim add.txt")
> error: type error

Perhaps I am mistaken, but this is not the grammar that you described
because the text file you're loading must be translated into Qi notation by
hand (by adding [ ] etc.)?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <1184930316.045355.132330@22g2000hsm.googlegroups.com>
On 20 Jul, 09:20, Jon Harrop <····@ffconsultancy.com> wrote:
> Mark Tarver wrote:
> > Hack my Minim program and put in an error (missing then)
>
> > [if [x = 0] [goto end] else [goto sub1x]]
>
> > (7+) (run-minim "minim add.txt")
> > error: type error
>
> Perhaps I am mistaken, but this is not the grammar that you described
> because the text file you're loading must be translated into Qi notation by
> hand (by adding [ ] etc.)?
>
> --
> Dr Jon D Harrop, Flying Frog Consultancy
> OCaml for Scientistshttp://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

If you prefer round brackets, then load it through Lisp.

(run '(etc etc....))

or if you hate the (run '( bit just type in Qi

(1-) (run (read-file "minim program.txt"))

with this in the file

       (print "Add x and y")
       nl
       (print "Input x: ")
       (input x)
       nl
       (print "Input y: ")
       (input y)
       main
       (if (x = 0) then (goto end) else (goto sub1x))

       sub1x
       (-- x)
       (++ y)
       (goto main)

       end
       nl
       (print "The total of x and y is ")
       (print y)
       nl

I probably need to add a typed version of read-file; would be useful.
But that works.

The problem here I think is the OCaml technology.  A Minim program is
just a list of a particular structure.  But languages like ML and its
relatives do not operate kindly with free form lists as does Lisp.  In
this event ML expects you to define constructor functions and if the
original source is not in that form then you need to lex and parse it
into your internal representation.

Qi does not need to do that with Minim because the Qi approach to
typing is designed to be consistent with the Lisp approach to
programming.  No Qi/Lisp programmer will write a Minim lexer that is
effectively nothing more than an identity function w.r.t. the input,
and parsing is not needed in Qi because if the syntax is wrong the Qi
type checker will tell him.

Your adopted religion requires you to operate in this way; like having
to eat cold boiled spinach on Sunday.  But I have no appetite for
eating cold spinach; this is why I left languages like ML, OCaml and
Haskell behind a long time ago.

Mark
From: Matthias Blume
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <m2abtrdt9a.fsf@hanabi.local.i-did-not-set--mail-host-address--so-tickle-me>
Mark Tarver <··········@ukonline.co.uk> writes:

> Qi does not need to do that with Minim because the Qi approach to
> typing is designed to be consistent with the Lisp approach to
> programming.  No Qi/Lisp programmer will write a Minim lexer that is
> effectively nothing more than an identity function w.r.t. the input,
> and parsing is not needed in Qi because if the syntax is wrong the Qi
> type checker will tell him.

I think you are being unfair here.  Clearly, you designed the language
specifically so that it can be parsed trivially using Lisp.
(Well, at least as long as you allow an extra pair of parentheses
around your program -- something that the original grammar you posted
did not.)
And, strangely, you don't even stick to that syntax but use a
different one in your Qi implementation...

> Your adopted religion requires you to operate in this way; like having
> to eat cold boiled spinach on Sunday.  But I have no appetite for
> eating cold spinach; this is why I left languages like ML, OCaml and
> Haskell behind a long time ago.

Well, Jon may come across as a bit nutty at times, but at least he
restricts his spinach-eating to Sundays.

IMNSHO, to be on level playing ground, the syntax of the language to
be interpreted should not blatantly favor one of the contestants.
Or would you agree to a syntax that happens to match, say, Haskell
data syntax?  Haskell programmers would have a huge advantage, since
they would just declare the data types for the abstract syntax, add
"deriving Read", and be done with the parser...

Cheers,
Matthias
From: Jon Harrop
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <46a0d084$0$1623$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Blume wrote:
> Mark Tarver <··········@ukonline.co.uk> writes:
>> Qi does not need to do that with Minim because the Qi approach to
>> typing is designed to be consistent with the Lisp approach to
>> programming.  No Qi/Lisp programmer will write a Minim lexer that is
>> effectively nothing more than an identity function w.r.t. the input,
>> and parsing is not needed in Qi because if the syntax is wrong the Qi
>> type checker will tell him.
> 
> I think you are being unfair here...

It would be unfair to compare the similar-sized Lisp and OCaml
implementations when only the OCaml implements a lexer and parser. Provided
you strip the lexer and parser from the OCaml, I think it is fair. The
OCaml remains several times faster and is now several times shorter as
well.

I think it is a shame that Mark backtracked from a description of the
grammar only to hard-code the interpreted program in the interpreter. I
would have liked to see a lexer and parser written in Lisp. Now I can only
assume that it is prohibitively difficult to implement such trivial
functionality correctly in Lisp.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <1184951371.316135.4770@m37g2000prh.googlegroups.com>
On Jul 20, 8:01 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Now I can only
> assume that it is prohibitively difficult to implement such trivial
> functionality correctly in Lisp.

Great!

Now go tell other people how OCaml/F# are superior to lisp and leave
those of us who noticed that the above is nothing more than a claim
about your limitations in peace.

-andy
From: Mark Tarver
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <1184958970.735434.52620@k79g2000hse.googlegroups.com>
On 20 Jul, 16:01, Jon Harrop <····@ffconsultancy.com> wrote:
> Matthias Blume wrote:
> > Mark Tarver <··········@ukonline.co.uk> writes:
> >> Qi does not need to do that with Minim because the Qi approach to
> >> typing is designed to be consistent with the Lisp approach to
> >> programming.  No Qi/Lisp programmer will write a Minim lexer that is
> >> effectively nothing more than an identity function w.r.t. the input,
> >> and parsing is not needed in Qi because if the syntax is wrong the Qi
> >> type checker will tell him.
>
> > I think you are being unfair here...
>
> It would be unfair to compare the similar-sized Lisp and OCaml
> implementations when only the OCaml implements a lexer and parser. Provided
> you strip the lexer and parser from the OCaml, I think it is fair. The
> OCaml remains several times faster and is now several times shorter as
> well.
>
> I think it is a shame that Mark backtracked from a description of the
> grammar only to hard-code the interpreted program in the interpreter. I
> would have liked to see a lexer and parser written in Lisp. Now I can only
> assume that it is prohibitively difficult to implement such trivial
> functionality correctly in Lisp.
>
> --
> Dr Jon D Harrop, Flying Frog Consultancy
> OCaml for Scientistshttp://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

> I think it is a shame that Mark backtracked from a description of the
> grammar only to hard-code the interpreted program in the interpreter.

Actually it is a doddle in this case.  I didn't attach vast importance
to this issue.   OK

(define load-and-run-minim
   File -> (time (run (read-file File)))

will do the trick.  This runs the following program in a file.

       (print "Add x and y")
       nl
       (print "Input x: ")
       (input x)
       nl
       (print "Input y: ")
       (input y)
       main
       (if (x = 0) then (goto end) else (goto sub1x))

       sub1x
       (-- x)
       (++ y)
       (goto main)

       end
       nl
       (print "The total of x and y is ")
       (print y)
       nl

This runs in untyped mode - I could get a version to run in typed mode
with little effort.

> I would have liked to see a lexer and parser written in Lisp.

I've said before, and I'll repeat it again, that the Qi type checker
is doing the parsing here.  A Lisper might volunteer to write you a
parser, but again I have no motivation or need to do so.  I've even
less motivation to write a lexer that acts as the identity function.

> I can only
> assume that it is prohibitively difficult to implement such trivial
> functionality correctly in Lisp.

You assume wrong, and you assume that people in comp.lang.lisp will
rush to disabuse you.  Read CLTL on reader macros.

> The OCaml remains several times faster and is now several times
> shorter as well.

Um, well you need to get rid of those timings of Minim-written-in-
OCaml/Mathematica which are about as relevant here as the contents of
the Great Pyramid.

I think you had these readings

QUOTE
I get roughly the same performance from OCaml's interpreted bytecode:

..........
real    0m0.583s
user    0m0.569s
sys     0m0.005s
UNQUOTE

Thats about the same as I'd expect from an SBCL that didn't blow up in
my face - maybe a little slower. But in the ball park.

QUOTE
However, native-code is over an order of magnitude faster:
................
real    0m0.050s
user    0m0.048s
sys     0m0.001s
UNQUOTE

Obviously the OCaml compiler is very efficient.  But you know perhaps
that the Qi program can be made faster (and shorter to boot)?

Mark
From: Jon Harrop
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <46a15019$0$1631$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> Actually it is a doddle in this case.  I didn't attach vast importance
> to this issue.   OK
> 
> (define load-and-run-minim
>    File -> (time (run (read-file File)))
> 
> will do the trick.  This runs the following program in a file.
> 
>        (print "Add x and y")
> ...
>        nl
> 
> I've said before, and I'll repeat it again, that the Qi type checker
> is doing the parsing here.

This is the input program according to the task that you set and the grammar
that you provided:

print "Add x and y"
nl
print "Input x: "
input x
nl
print "Input y: "
input y
main
if x = 0 then goto end else goto sub1x
sub1x
-- x
++ y
goto main
end
nl
print "The total of x and y is "
print y
nl

My program handles it, yours requires the user to lex and parse it by hand
into s-exprs themselves.

> Obviously the OCaml compiler is very efficient.  But you know perhaps
> that the Qi program can be made faster (and shorter to boot)?

Wonderful. Please post some code so that we can all test it objectively and
quantitatively.

Are we now removing the grammar, lexer and parser from this benchmark?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <1185001519.636384.29430@m3g2000hsh.googlegroups.com>
On 21 Jul, 01:06, Jon Harrop <····@ffconsultancy.com> wrote:
> This is the input program according to the task that you set and the grammar
> that you provided:
>
> print "Add x and y"
> nl
> print "Input x: "
> input x
> nl
> print "Input y: "
> input y
> main
> if x = 0 then goto end else goto sub1x
> sub1x
> -- x

Actually it isn't you know.

> Wonderful. Please post some code so that we can all test it objectively and
> quantitatively.

The easiest way to speed the program is to transfer it from CLisp to a
fast Lisp like CMUCL.  A point of optimisation is to change the line.

[[++ Var] | Ss] Program Env
-> (run-loop Ss Program (change-env Var (+ 1 (look-up Var Env)) Env))

to

[[++ Var] | Ss] Program Env
 -> (run-loop Ss Program (fast-change-env Var inc Env))

where fast-change-env traverses the environment once instead of
traversing twice as in the old version.

(define fast-change-env
 {symbol --> symbol --> env --> env}
 Var Cmd [(@p Var Val) | Env] -> [(@p Var (if (= Cmd inc)
                                              (+ 1 Val)
                                              (- Val 1))) | Env]
 Var Cmd [Binding | Env] -> [Binding | (fast-change-env Var Cmd Env)])

Running this under CMUCL gives about 0.25s to run the Minim program.
Much faster than CLisp's 2.1s.  As I said, CLisp is slow.

For me there is no lexer, but you can count parsing into the time
which is here done by the Qi typechecker.  It takes 2689 inferences to
read and typecheck (= parse) the Minim program into Qi and CLisp reads
and typechecks it from file at about 100-150K inferences/sec.

Real time: 4.828125 sec.
Run time: 0.03125 sec.
Space: 289524 Bytes
GC: 1, GC time: 0.0 sec.
loaded : symbol

600 KLIPS under CMU is about right according to my benchmarks, so the
time for this process is really quite small.

You can optimise it further and reduce the processing time to 0.17s
under CMU by dispensing with using an association list as an
environment and using the Lisp/Qi inbuilt assignment.  The result
cannot be typechecked w.r.t. the type theory I supplied.

Actually this is not uncommon - often you can find shortcuts in a
program that work but are not demonstrably w.r.t. the type checker
type secure.  There is a difference between what we can see to be true
and what can be formally demonstrated to be true within a given
framework (as Godel proved 80 years ago in another context).  The
existence of this disparity is one reason why some programmers prefer
to work outside type secure languages because they see them as
preventing possible good solutions.

If you actually compile Minim into Lisp then the figures given have to
be divided by 50 or thereabouts.  The compiler is also very small. As
I said above, you cannot meaningfully compare an OCaml version of the
Minim program to a Qi interpreter.

But staying within the bounds of type security you have this

Execution time under CMU: 0.25s
Read and typecheck (parse): 0.01s

This is twice as fast as OCaml interpreted bytecode, but 5X slower
than OCaml native code according to your benchmarks.

Mark
From: Jon Harrop
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <46a1b7b4$0$1633$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> ...
> Actually it isn't you know.

How so?

> This is twice as fast as OCaml interpreted bytecode, but 5X slower
> than OCaml native code according to your benchmarks.

I should probably detail the optimization phases that I've done as it sounds
like you're not doing them. They made it 6x faster.

1. Strip tags and replace the tags in gotos with program counters.

2. Replace each variable name with a reference to an integer.

These (particularly the latter) avoid all association list lookups during
interpreting.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <pNooi.20987$j7.378808@news.indigo.ie>
Jon Harrop wrote:

> Mark Tarver wrote:
>> ...
>> Actually it isn't you know.
> 
> How so?
> 

Well, the BNF had left and right round brackets embedded in it...

It says 
<conditional> := (if <test> then <statement> else <statement>);
not
<conditional> := if <test> then <statement> else <statement>;

They're round rather than square brackets, of course, but hey.
From: Mark Tarver
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <1184955196.171297.248870@n60g2000hse.googlegroups.com>
On 20 Jul, 15:50, Matthias Blume <·····@hanabi.local> wrote:
> Mark Tarver <··········@ukonline.co.uk> writes:
> > Qi does not need to do that with Minim because the Qi approach to
> > typing is designed to be consistent with the Lisp approach to
> > programming.  No Qi/Lisp programmer will write a Minim lexer that is
> > effectively nothing more than an identity function w.r.t. the input,
> > and parsing is not needed in Qi because if the syntax is wrong the Qi
> > type checker will tell him.
>
> I think you are being unfair here.  Clearly, you designed the language
> specifically so that it can be parsed trivially using Lisp.
> (Well, at least as long as you allow an extra pair of parentheses
> around your program -- something that the original grammar you posted
> did not.)
> And, strangely, you don't even stick to that syntax but use a
> different one in your Qi implementation...
>
> > Your adopted religion requires you to operate in this way; like having
> > to eat cold boiled spinach on Sunday.  But I have no appetite for
> > eating cold spinach; this is why I left languages like ML, OCaml and
> > Haskell behind a long time ago.
>
> Well, Jon may come across as a bit nutty at times, but at least he
> restricts his spinach-eating to Sundays.
>
> IMNSHO, to be on level playing ground, the syntax of the language to
> be interpreted should not blatantly favor one of the contestants.
> Or would you agree to a syntax that happens to match, say, Haskell
> data syntax?  Haskell programmers would have a huge advantage, since
> they would just declare the data types for the abstract syntax, add
> "deriving Read", and be done with the parser...
>
> Cheers,
> Matthias

Ah, that's quite legitimate, but not the point Jon is making.  Jon is
saying 'Where's your lexer?' and I'm saying 'Actually Qi doesn't need
one here for this example' - the lexer is the identity function.
Properly
then we can level the field by taking out his lexer
from LOC and the timings and that would be fine by me.
More sensible than requiring me to write a lexer that does nothing -
that's very dull :(.  And the parsing is done by the Qi type checker.

The rest of his complaint about needing []s and run is easily solved
(see my reply to him).

Mark
From: Jon Harrop
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <46a0cebb$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> The problem here I think is the OCaml technology...

Allow me to cripple the OCaml, making it as featureless as the Lisp:

module Bindings = Map.Make(String)

let set m x y = Bindings.add x y m
let get m x = Bindings.find x m

let tags_of program =
  let aux (pc, tags) = function
    | `Tag t -> pc+1, Bindings.add t pc tags
    | _ -> pc+1, tags in
  let _, tags = Array.fold_left aux (0, Bindings.empty) program in
  tags

let eval vars = function
  | `Lit n -> n
  | `Var v -> get vars v

let rec test vars = function
  | `Less(f, g) -> eval vars f < eval vars g
  | `Equal(f, g) -> eval vars f = eval vars g
  | `Greater(f, g) -> eval vars f > eval vars g
  | `And(f, g) -> test vars f && test vars g
  | `Or(f, g) -> test vars f || test vars g
  | `Not f -> not(test vars f)

let rec statement tags vars pc = function
  | `Assign(x, y) -> set vars x (eval vars y), pc + 1
  | `Incr x -> set vars x (get vars x + 1), pc + 1
  | `Decr x -> set vars x (get vars x - 1), pc + 1
  | `If(p, t, f) -> statement tags vars pc (if test vars p then t else f)
  | `Goto tag -> vars, Bindings.find tag tags
  | `Tag _ -> vars, pc + 1
  | `PrintString s -> print_string s; vars, pc + 1
  | `Print x -> print_int(get vars x); vars, pc + 1
  | `Input x -> set vars x (int_of_string(input_line stdin)), pc + 1

let rec run program tags (vars, pc) =
  run program tags (statement tags vars pc program.(pc))

let () =
  run
    [|PrintString "Add x and y"; PrintString "\n"; PrintString "Input x: ";
      Input "x"; PrintString "\n"; PrintString "Input y: "; Input "y";
      Tag "main"; If (Equal (Var "x", Lit 0), Goto "end", Goto "sub1x");
      Tag "sub1x"; Decr "x"; Incr "y"; Goto "main"; Tag "end";
      PrintString "\n"; PrintString "The total of x and y is "; Print "y";
      PrintString "\n"|]
    (tags_of program) (Bindings.empty, 0)

> Your adopted religion requires you to operate in this way...

As you can see, the OCaml operates perfectly well both ways. The Lisp,
however, does not.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Matthias Blume
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <m2644eecp1.fsf@hanabi.local.i-did-not-set--mail-host-address--so-tickle-me>
Jon Harrop <···@ffconsultancy.com> writes:

> Mark Tarver wrote:
>> The problem here I think is the OCaml technology...
>
> Allow me to cripple the OCaml, making it as featureless as the Lisp:
>
> module Bindings = Map.Make(String)
>
> let set m x y = Bindings.add x y m
> let get m x = Bindings.find x m
>
> let tags_of program =
>   let aux (pc, tags) = function
>     | `Tag t -> pc+1, Bindings.add t pc tags
>     | _ -> pc+1, tags in
>   let _, tags = Array.fold_left aux (0, Bindings.empty) program in
>   tags
>
> let eval vars = function
>   | `Lit n -> n
>   | `Var v -> get vars v
>
> let rec test vars = function
>   | `Less(f, g) -> eval vars f < eval vars g
>   | `Equal(f, g) -> eval vars f = eval vars g
>   | `Greater(f, g) -> eval vars f > eval vars g
>   | `And(f, g) -> test vars f && test vars g
>   | `Or(f, g) -> test vars f || test vars g
>   | `Not f -> not(test vars f)
>
> let rec statement tags vars pc = function
>   | `Assign(x, y) -> set vars x (eval vars y), pc + 1
>   | `Incr x -> set vars x (get vars x + 1), pc + 1
>   | `Decr x -> set vars x (get vars x - 1), pc + 1
>   | `If(p, t, f) -> statement tags vars pc (if test vars p then t else f)
>   | `Goto tag -> vars, Bindings.find tag tags
>   | `Tag _ -> vars, pc + 1
>   | `PrintString s -> print_string s; vars, pc + 1
>   | `Print x -> print_int(get vars x); vars, pc + 1
>   | `Input x -> set vars x (int_of_string(input_line stdin)), pc + 1
>
> let rec run program tags (vars, pc) =
>   run program tags (statement tags vars pc program.(pc))
>
> let () =
>   run
>     [|PrintString "Add x and y"; PrintString "\n"; PrintString "Input x: ";
>       Input "x"; PrintString "\n"; PrintString "Input y: "; Input "y";
>       Tag "main"; If (Equal (Var "x", Lit 0), Goto "end", Goto "sub1x");
>       Tag "sub1x"; Decr "x"; Incr "y"; Goto "main"; Tag "end";
>       PrintString "\n"; PrintString "The total of x and y is "; Print "y";
>       PrintString "\n"|]
>     (tags_of program) (Bindings.empty, 0)
>

Did you actually try to compile this code?  It does not look right.
(Unbound variable "program").
From: Jon Harrop
Subject: Re: statically typed languages vs the Lisp/Qi approach
Date: 
Message-ID: <46a1b501$0$1633$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Blume wrote:
> Did you actually try to compile this code?  It does not look right.
> (Unbound variable "program").

No, I was gedankencoding. :-)

Try this:

let () =
  let program =
    [|PrintString "Add x and y"; PrintString "\n"; PrintString "Input x: ";
      Input "x"; PrintString "\n"; PrintString "Input y: "; Input "y";
      Tag "main"; If (Equal (Var "x", Lit 0), Goto "end", Goto "sub1x");
      Tag "sub1x"; Decr "x"; Incr "y"; Goto "main"; Tag "end";
      PrintString "\n"; PrintString "The total of x and y is "; Print "y";
      PrintString "\n"|] in
  run program (tags_of program) (Bindings.empty, 0)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469da92c$0$1624$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> Minim is a very basic language - fairly close to assembly.

This is ok but I think minim is a little too simple.

> Here's the stock program to add two numbers together in Minim -
> designed to here run under Qi. You should be able to follow it.
> 
> [      [print "Add x and y"]
>        nl
>        [print "Input x: "]
>        [input x]
>        nl
>        [print "Input y: "]
>        [input y]
>        main
>        [if [x = 0] then [goto end] else [goto sub1x]]
> 
>        sub1x
>        [-- x]
>        [++ y]
>        [goto main]
> 
>        end
>        nl
>        [print "The total of x and y is "]
>        [print y]
>        nl]

Can you write a parser so the program can be loaded from a text file written
in the syntax you described? Unfair to hard code it...

Here's my expr.ml:

type 'var value =
  | Int of int
  | Var of 'var

type 'var test =
  | Less of 'var value * 'var value
  | Equal of 'var value * 'var value
  | Greater of 'var value * 'var value
  | And of 'var test * 'var test
  | Or of 'var test * 'var test
  | Not of 'var test

type ('var, 'tag) statement =
  | Assign of 'var * 'var value
  | Incr of 'var
  | Decr of 'var
  | If of 'var test * ('var, 'tag) statement * ('var, 'tag) statement
  | Goto of 'tag
  | Tag of 'tag
  | PrintString of string
  | Print of 'var
  | Input of 'var

type program = (string, string) statement list

> A Qi Solution
> _____________
> 
> Here's a type secure implementation of an interpreter for Minim in Qi.
> The type theory encapsulates the BNF and is 54 lines of sequent
> calculus.

Can you give an example of errors that your static type system catches?

Here's my lexer.mll:

{
open Parser
open Expr

let start = ref 1 and line = ref 1

let newline lexbuf =
  start := lexbuf.Lexing.lex_curr_p.Lexing.pos_cnum;
  incr line

let ident = function
  | "is" -> IS
  | "if" -> IF
  | "then" -> THEN
  | "else" -> ELSE
  | "goto" -> GOTO
  | "print" -> PRINT
  | "nl" -> NL
  | "input" -> INPUT
  | "and" -> AND
  | "or" -> OR
  | "not" -> NOT
  | s -> IDENT s
}

let digit = ['0'-'9']
let alpha = ['a'-'z' 'A'-'Z']+
let ident = alpha+ (alpha | digit)*

rule token = parse
  | '\n'            { newline lexbuf; token lexbuf }
  | [' ' '\t' '\r'] { token lexbuf }
  | '='             { EQUAL }
  | "++"            { INC }
  | "--"            { DEC }
  | '<'             { LESS }
  | '='             { EQUAL }
  | '>'             { GREATER }
  | digit+ as s     { INT (int_of_string s) }
  | ident as s      { ident s }
  | '"' (("\\\"" | [^ '"'])* as s) '"' { STRING s }
  | eof             { EOF }

Here's my parser.mly:

%{
  open Expr
%}

%token <string> STRING IDENT
%token <int> INT
%token IS IF THEN ELSE GOTO PRINT NL INPUT AND OR NOT EOF INC DEC LESS EQUAL
GREATER

%start program
%type <Expr.program> program

%%

program:
| statement program                       { $1 :: $2 }
| statement EOF                           { [$1] };

statement:
| IDENT IS value                          { Assign($1, $3) }
| INC IDENT                               { Incr $2 }
| DEC IDENT                               { Decr $2 }
| IF test THEN statement ELSE statement   { If($2, $4, $6) }
| GOTO IDENT                              { Goto $2 }
| IDENT                                   { Tag $1 }
| PRINT STRING                            { PrintString $2 }
| PRINT IDENT                             { Print $2 }
| NL                                      { PrintString "\n" }
| INPUT IDENT                             { Input $2 };

value:
| INT                                     { Int $1 }
| IDENT                                   { Var $1 };

test:
| value LESS value    { Less($1, $3) }
| value EQUAL value   { Equal($1, $3) }
| value GREATER value { Greater($1, $3) }
| test AND test       { And($1, $3) }
| test OR test        { Or($1, $3) }
| NOT test            { Not $2 };

> \The program that runs Minim programs is 56 lines of Qi and is given
> here.\

Here's my 43-line eval.ml:

open Expr
open Printf

module Bindings = Map.Make(String)

let set m x y = Bindings.add x y m

let get m x = Bindings.find x m

let tags_of program =
  let aux (pc, tags) = function
    | Tag t -> pc+1, Bindings.add t pc tags
    | _ -> pc+1, tags in
  let _, tags = Array.fold_left aux (0, Bindings.empty) program in
  tags

let eval vars = function
  | Int n -> n
  | Var v -> get vars v

let rec test vars = function
  | Less(f, g) -> eval vars f < eval vars g
  | Equal(f, g) -> eval vars f = eval vars g
  | Greater(f, g) -> eval vars f > eval vars g
  | And(f, g) -> test vars f && test vars g
  | Or(f, g) -> test vars f || test vars g
  | Not f -> not(test vars f)

let rec statement tags vars pc = function
  | Assign(x, y) -> set vars x (eval vars y), pc + 1
  | Incr x -> set vars x (get vars x + 1), pc + 1
  | Decr x -> set vars x (get vars x - 1), pc + 1
  | If(p, t, f) -> statement tags vars pc (if test vars p then t else f)
  | Goto tag -> vars, Bindings.find tag tags
  | Tag _ -> vars, pc + 1
  | PrintString s -> print_string s; vars, pc + 1
  | Print x -> print_int(get vars x); vars, pc + 1
  | Input x -> set vars x (int_of_string(input_line stdin)), pc + 1

let rec run program tags (vars, pc) =
  run program tags (statement tags vars pc program.(pc))

let () =
  match Sys.argv with
  | [|_; file|] ->
      let ch = open_in file in
      let program = Parser.program Lexer.token (Lexing.from_channel ch) in
      close_in ch;
      let program = Array.of_list program in
      (try run program (tags_of program) (Bindings.empty, 0) with _ -> ())
  | _ -> invalid_arg "Usage: ./minim <file>"

> NB: This is run under CLisp which is *much* slower than SBCL.  My
> version of SBCL (1.0) for Windows is rather neurotic and I've had to
> choose the slower but more stable CLisp.  This means I've probably
> lost out by a factor of 4 (at a guess).
> ...
> The total of x and y is 200000
> 
> Real time: 12.15625 sec.
> Run time: 2.125 sec.

I get roughly the same performance from OCaml's interpreted bytecode:

$ ocamlbuild eval.byte
+ /usr/bin/ocamlyacc parser.mly
6 shift/reduce conflicts.
Finished, 13 targets (0 cached) in 00:00:03.
$ time ./eval.byte test.minim <args.txt
Add x and y
Input x:
Input y:
The total of x and y is 200000

real    0m0.583s
user    0m0.569s
sys     0m0.005s

However, native-code is over an order of magnitude faster:

$ ocamlbuild eval.native
Finished, 16 targets (11 cached) in 00:00:03.
$ time ./eval.native test.minim <args.txt
Add x and y
Input x:
Input y:
The total of x and y is 200000

real    0m0.050s
user    0m0.048s
sys     0m0.001s

To optimize this, I would precompute branch targets and variable space,
substituting the gotos and variable references with integers instead of
strings. I deliberately parameterized the expr type over the types of
variables and tags to make this easy.

> This whole post is a commented Qi program so you can load it into Qi.

Now that is cool. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184851141.072101.234630@x40g2000prg.googlegroups.com>
OK; I'm reading this group on one news reader which works but does not
let me post and replying on Google which lets me reply (sometimes) but
does not let me read my own post.  Rather weird - like watching a film
in which the action is out of sync with the sound.  I'm going to
suppose that this ends up where it should.

> This is ok but I think minim is a little too simple.

Well its a post, so I wanted it not to be too long.

> Can you write a parser so the program can be loaded from a text file written
> in the syntax you described? Unfair to hard code it...

Not needed - just place (time (run ....)) into a text file and load it
with type checking enabled.
The type checker will parse the input to ensure it conforms to the
requirements.

> Can you give an example of errors that your static type system catches?

Any syntax error in a Minim program; for example missing a 'then' in
an if-statement.
Any error in my interpreter that comes from getting Minim syntax wrong
or getting
confused over my data structures - e.g. trying to find the value of a
constant.

Mark
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a1bfaa$0$1590$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> > Can you give an example of errors that your static type system catches?
> 
> Any syntax error in a Minim program; for example missing a 'then' in
> an if-statement.

With the program hard-coded, such errors will be caught by OCaml's static
type system. When the program is lexed and parsed properly, the parser
would have caught that error.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ole Nielsby
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469e7866$0$98084$edfadb0f@dread14.news.tele.dk>
!!
This post is a commented PILS program.
It compiles the minix stock program to a set of PILS rules and executes
it by repeatingly applying the ruleset to a state node that holds the 
variables.

The style is what I call narrative programming - I start with
the mimix program in a text literal, then describe what to do with it.

Parsing is done with a cheat - using 9 code lines.
The compiler as such approx. 45 code lines. I tried to cover all minix.
The "minix runtime" is 30-odd lines, mostly because PILS doesn't have
a console interface - the text input/output is done via wxWidgets dialogs.

Execution time for 100000 + 100000 is approx. 1.7 seconds on a
2GHz dual core2, using the VC2005 release build of the PILS system.
(Actually, I tested with 1000000 + 1000000 and got 17 seconds.)
I made the compiler as simple as possible - a few simple optimisations
would make it somewhat faster.
!!

{.test
|:ok

!!
Here goes the minix stock program.
String delimiters are doubled, by PILS convention
!!

"
(print ""Add x and y (what a feat!)"")
nl
(print ""Input x: "")
(input x)
nl
(print ""Input y: "")
(input y)
main
(if (x = 0) then (goto end) else (goto sub1x))

sub1x
(-- x)
(++ y)
(goto main)

end
nl
(print ""The total of x and y is "")
(print y)
nl
"

!!
To parse it using the PILS constant syntax,
 replace () with [list: ] outside strings.
!!

  split """"
  split 2
  each
  ( { a, b |:ok process (a), b }
    { a, | :ok process (a) }
    where
    { process: string | :ok string replace ["(" "[list: " ")" "]"] } )
  splice splice """"

!!
enclose in [] and readt, using the PILS parser
!!

  call {s|:ok ?:? read ("[" (s) "]") }

!!
compile to tagged actions
!!

  listwise counting
  { number := statement
  | list [actions] :=
    ?tag number
    .action
    ?next (' ?state) head (tag: number + 1);
    statement try
    { / tag | :ok list [actions] := (?tag .action next); next }
    { .nl | :ok ::- ' print ""/; next }
    { list: [print], / v | :ok ::- (: print (: ' state . v)); next }
    { list: [print], $ string | :ok ::- (: print (string)); next }
    { list: [input], / v | :ok v := ' input }
    { list: [goto], / v | :ok ' (?state) head: tag: v }
    { list: [++], / v | :ok v := : val (v) + 1 }
    { list: [--], / v | :ok v := : val (v) - 1 }
    { list: / a, [is], b | :ok a := val (b) }
    { list: [if], if, [then], then, [else], else
    | :self :ok
      ::if . try
      { list: a, [=], b | :ok : val (a) = val (b) }
      { list: a, [<], b | :ok : val (a) < val (b) }
      { list: a, [>], b | :ok : val (a) > val (b) }
      { list: a, [and], b | :self :ok ::if a try (self); b try (self) }
      { list: a, [or], b | :self :ok ::if a try (self); 1 .else b try 
(self) }
      { list: [not], a | :self :ok : a <> 1 }
      ; then try (self)
      .else . try (self)
    }
    where
    ! This rule implements assignment by constructing a new state
    { / v := e
    | :where :ok
      next merge: ?state
      : (' state) merge: (node [?] (where . v) := e) node [?]
    }
    ! Constant vals are as-is, variables must be fetched from the state
    { .val (= e) | :ok e }
    { .val (/ v) | :ok : ' state . v }
  }
  list [actions]
  ! tag/action pairs are wrapped
  every
  {?tag .action
  | :ok ?match (' ?state) head (tag: tag) .action (::ok action)
  }

  first [?]
  fold {rules := ?match .action|:ok ::match .action ; rules}
  call {rules | :ok ::ruleset rules}

!!
This produces the following PILS rules:
{[tag: 1]: .state|:ok :- print "Add x and y (what a feat!)"; [tag: 2]: 
.state}
{[tag: 2]: .state|:ok :- print ""/; [tag: 3]: .state}
{[tag: 3]: .state|:ok :- print "Input x: "; [tag: 4]: .state}
{[tag: 4]: .state|:ok [tag: 5]: .state . merge (?x input)}
{[tag: 5]: .state|:ok :- print ""/; [tag: 6]: .state}
{[tag: 6]: .state|:ok :- print "Input y: "; [tag: 7]: .state}
{[tag: 7]: .state|:ok [tag: 8]: .state . merge (?y input)}
{[tag: main]: .state|:ok [tag: 9]: .state}
{[tag: 8]: .state|:ok [tag: 9]: .state}
{[tag: 9]: .state|:ok :else ([tag: sub1x]: .state) .if state v = 0; [tag: 
end]: .state}
{[tag: sub1x]: .state|:ok [tag: 11]: .state}
{[tag: 10]: .state|:ok [tag: 11]: .state}
{[tag: 11]: .state|:ok [tag: 12]: .state . merge (?x state v - 1)}
{[tag: 12]: .state|:ok [tag: 13]: .state . merge (?y state v + 1)}
{[tag: 13]: .state|:ok [tag: main]: .state}
{[tag: end]: .state|:ok [tag: 15]: .state}
{[tag: 14]: .state|:ok [tag: 15]: .state}
{[tag: 15]: .state|:ok :- print ""/; [tag: 16]: .state}
{[tag: 16]: .state|:ok :- print "The total of x and y is "; [tag: 17]: 
.state}
{[tag: 17]: .state|:ok :- print (state v); [tag: 18]: .state}
{[tag: 18]: .state|:ok :- print ""/; [tag: 19]: .state}

Now run it, using the PILS editor window as base for dialogs
(wxWidgets PILS doesn't alow orphan dialogs, they mess up the event 
handling)
!!

  call
  { minix-rules
  | :rule [runner];
    ?window [channel: editor] window;
    ( node [ps] print := "";
      ?endstate
      ([tag: 1]: .state ?:) repeat
      ( minix-rules ---
        { :nonsense | :what rule [runner] :ok what }
        { .print ($ string) | :ok node [ps] print := node [ps] print . 
string }
        { .print ""/
        | :ok
          :if node [ps] print => +$ message;
          :- window wx:MessageBox (message);
          node [ps] print := ""
        }
        { .print (% n)|:self :try self print (?:? write (n)) }
        { .input
        | :if
            window wx:GetTextFromUser (node [ps] print, "minix-input") => +$ 
text,
            ?:? read (text) => % text
          ;
          node [ps] print := "";
          :ok text
        }
! for debugging, a rule can be inserted here.
!        but {state|state bug (?:?)}
      )
      ;
      :ok endstate
    )
    node [ps]
  }
}

!!
This should be a builtin, haven't implemented it yet.
    list split 2   splits a list in pairs, etc.
!!
where
{ & list split (+ splitsize)
| :ok
  list count repeat
  { + count | :ok list() := list <+# count ++# splitsize; count - 
splitsize }
  list()
}
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469f42c1$0$1615$ed2619ec@ptn-nntp-reader02.plus.net>
Ole Nielsby wrote:
> Parsing is done with a cheat - using 9 code lines.
> The compiler as such approx. 45 code lines. 

The short interpreter is 63 LOC in OCaml or 133 LOC including the full lexer
and yacc-based parser.

> Execution time for 100000 + 100000 is approx. 1.7 seconds on a
> 2GHz dual core2, using the VC2005 release build of the PILS system.

Not meaning to be rude, but why are the Qi and PILS implementations so slow?

This is just looping and incrementing 100,000 times and you guys are getting
times ~1s? That means you're doing O(10,000) machine operations per loop of
the minim code, which is just crazy.

Mathematica is the slowest language that I have access to and even it only
takes 0.27s to complete this problem:

$ ledit MathKernel
Mathematica 5.1 for Linux x86 (64 bit)
Copyright 1988-2004 Wolfram Research, Inc.
 -- Motif graphics initialized --

In[1]:= x=100000; y=100000;

In[2]:= Timing[While[x>0, --x; ++y]; y]

Out[2]= {0.26896 Second, 200000}

> (Actually, I tested with 1000000 + 1000000 and got 17 seconds.)

That is ~200x slower than the OCaml, which takes only 0.08s:

$ time ./eval.native test.minim <args.txt
Add x and y
Input x: 1000000
Input y: 1000000
The total of x and y is 2000000

real    0m0.080s
user    0m0.078s
sys     0m0.002s

Rewriting the test Minim program in OCaml:

let x = ref 1000000
let y = ref 1000000

let () =
  while !x>0 do
    decr x;
    incr y;
  done;
  Printf.printf "y=%d\n%!" !y

and running it using OCaml's bytecode interpreter takes only 0.004s so it is
20x faster than my naive term-level interpreter, which sounds about right.
I can't imagine what you're doing to make it run another 200 times slower
though...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ole Nielsby
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469fc381$0$7245$edfadb0f@dread14.news.tele.dk>
Jon Harrop <···@ffconsultancy.com> wrote:

> Ole Nielsby wrote:
>> Parsing is done with a cheat - using 9 code lines.
>> The compiler as such approx. 45 code lines.
>
> The short interpreter is 63 LOC in OCaml or 133 LOC including the full 
> lexer
> and yacc-based parser.

The Kvernbitr parser generator (written in PILS) can parse yacc-unfriendly
languages like VB6 and SQL, using a BNF-like syntax. I haven't yet ported
it to the new PILS dialect which I am going to publish soon.

>> Execution time for 100000 + 100000 is approx. 1.7 seconds on a
>> 2GHz dual core2, using the VC2005 release build of the PILS system.
>
> Not meaning to be rude, but why are the Qi and PILS implementations so 
> slow?
> Mathematica is the slowest language that I have access to and even it only
> takes 0.27s to complete this problem

Whereas PILS takes 0.35s using a direct approach (comparable to the
Mathematica snippet you posted):

  (?x 100000 .y 100000)
  repeat {: ?x . [+] .y|:ok ?x . - 1 .y . + 1}
  y

So it's about the same speed as Mathematica - assumig similar CPUs.

The slowness is mostly due to boxing and unifying of numbers. This
makes PILS unfit for serious number crunching, whereas processing of
texts and node trees is quite fast. So the speed depends on what you
use it for. The unified number boxing may be bad for numeric calculations
but it is part of a strategy that makes pattern matching very fast. So it's
a tradeoff by design.

There is still room for improvement though. There are things I could
do to the PILS interpreter to bypass boxing in cases like this - it's just
not a priority now, the language was never meant for number crunching.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469fc6c4$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Ole Nielsby wrote:
> Jon Harrop <···@ffconsultancy.com> wrote:
>> Ole Nielsby wrote:
>> The short interpreter is 63 LOC in OCaml or 133 LOC including the full
>> lexer
>> and yacc-based parser.
> 
> The Kvernbitr parser generator (written in PILS) can parse yacc-unfriendly
> languages like VB6 and SQL, using a BNF-like syntax. I haven't yet ported
> it to the new PILS dialect which I am going to publish soon.

Yes. I tried writing the parser in camlp4 and using streams first but never
got them to work. I'm going to have another hack at a stream-based parser
as it will be much shorter.

>>> Execution time for 100000 + 100000 is approx. 1.7 seconds on a
>>> 2GHz dual core2, using the VC2005 release build of the PILS system.
>>
>> Not meaning to be rude, but why are the Qi and PILS implementations so
>> slow?
>> Mathematica is the slowest language that I have access to and even it
>> only takes 0.27s to complete this problem
> 
> Whereas PILS takes 0.35s using a direct approach (comparable to the
> Mathematica snippet you posted):
> 
>   (?x 100000 .y 100000)
>   repeat {: ?x . [+] .y|:ok ?x . - 1 .y . + 1}
>   y
> 
> So it's about the same speed as Mathematica - assumig similar CPUs.

Argh, I see. You were both running interpreters in interpreted languages. I
should compile the OCaml to interpreted bytecode rather than native code
for a fairer comparison then. In which case I get (neglecting machine
differences):

CLisp: 0.86s
PILS: 0.35s
OCaml: 0.11s

That's much more inline with what I'd expect.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <469f33b8$0$1613$ed2619ec@ptn-nntp-reader02.plus.net>
Mark Tarver wrote:
> This whole post is a commented Qi program so you can load it into Qi.

How do I get a working Qi environment? I can't find a Debian package. Is
there a Lisp equivalent of CPAN or GODI that makes it easy to fetch and
install such things?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Mark Tarver
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184881804.279147.61540@o61g2000hsh.googlegroups.com>
On 19 Jul, 10:40, Jon Harrop <····@ffconsultancy.com> wrote:
> Mark Tarver wrote:
> > This whole post is a commented Qi program so you can load it into Qi.
>
> How do I get a working Qi environment? I can't find a Debian package. Is
> there a Lisp equivalent of CPAN or GODI that makes it easy to fetch and
> install such things?
>
> --
> Dr Jon D Harrop, Flying Frog Consultancy
> OCaml for Scientistshttp://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

When Qi first came out it ran only under Windows CLisp and so I used
to
distribute executables.  Since then Qi has been ported to Allegro,
SBCL and
CMUCL and runs under Xnix and Windows.  So I stopped issuing
executables
because the combinations of platforms and OS were too many.  The
download from
Lambda Associates assumes that you've already got one of these Lisps
and
takes it from there.

J. T. Gleason runs a Google open source repository for Qi and he has
written
an installation package for Qi - see http://code.google.com/p/qilang/

The fastest platform is (I think) CMUCL though I haven't timed it
against
my beta release Windows SBCL.

Mark
From: MetaProgrammer
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184931045.729686.117540@w3g2000hsg.googlegroups.com>
On Jul 18, 1:24 am, Mark Tarver <··········@ukonline.co.uk> wrote:
> \Jon suggested that it would be good to implement some significant
> programs in different functional languages for comparison.  He
> suggested interpreters for procedural languages like Basic.

 Hi there!

 Here is our solution, using the .NET framework and MBase
(see http://www.meta-alternative.net/techpreview.html)

 The compiled code works even faster than corresponding C# code,
compilation time is reasonably small.

-------
;;; Language abstract syntax tree
(def:ast minim ()
  (*TOP* <program>) ;; entry point, used internally
  (program <*statement:sts>)
  (statement
   (| (Ass <ident:var> <val:val>)
      (++ <ident:var>)
      (-- <ident:var>)
      (Cnd <test:tst> <statement:tr> <statement:fl>)
      (Gto <ident:tag>)
      (Tag <ident:tag>)
      (PrntStr <string:val>)
      (PrntVal <val:val>)
      (PrntNl)
      (Input <ident:v>)
      ))
  (test
   (| (Comp cmp <val:left> <val:right>)
      (And <test:left> <test:right>)
      (Or <test:left> <test:right>)
      (Not <test:t>)
      ))
  (val (| (V <ident:v>) (C num)))
  )

;;; Some .NET stuff
(define _print_mtd (r_mtd "System.Console" "Write" object))
(define _readline_mtd (r_mtd "System.Console" "ReadLine"))
(define _parse_mtd (r_mtd "System.Int32" "Parse" string))

;;; Compiler: compiling minim program into .NET IL
(function minim->cli ( expr )
  (<> expr
   (ast:visit minim program
     (program DEEP ;; flatten the compiled statements list
       (foldl append '() sts))
     (statement DEEP ( ;; compile the statement, deeper ones first
       (Ass
	    `((local ,var ,t_Int32)
	      ,@val
	      (Stloc (var ,var))))
       (++ `((Ldloc (var ,var))
	     ,(_ldc_i4 1)
	     (Add)
	     (Stloc (var ,var))))
       (-- `((Ldloc (var ,var))
	     ,(_ldc_i4 1)
	     (Sub)
	     (Stloc (var ,var))))
       (Cnd
	(with-syms (lend lfl)
	  `(,@tst
	    (Brfalse (label ,lfl))
	    ,@tr
	    (Br (label ,lend))
	    (label ,lfl)
	    ,@fl
	    (label ,lend)
	    )))
       (Gto `((Br (label ,tag))))
       (Tag `((label ,tag)))
       (PrntStr
	    `((Ldstr ,val) (Call ,_print_mtd)))
       (PrntVal
	    `(,@val (Box ,t_Int32)
	      (Call ,_print_mtd)))
       (PrntNl
	 `((Ldstr "\n") (Call ,_print_mtd)))
       (Input
	 `((local ,v ,t_Int32)
           (Call ,_readline_mtd)
	   (Call ,_parse_mtd)
	   (Stloc (var ,v))))))
     (val DEEP ( ;; compile the value lookup
       (V `((Ldloc (var ,v))))
       (C `(,(_ldc_i4 num)))))
     (test DEEP ( ;; compile the test statement
       (Comp
             `(,@left ,@right
	       ,(case cmp ((>) '(Cgt)) ((<) '(Clt)) ((=) '(Ceq)))))
       (And  `(,@left ,@right (And)))
       (Or   `(,@left ,@right (Or)))
       (Not  `(,t (Not)))))
     )))

;;; Being fair: won't reuse s-expressions parser,
;;; implementing a real one, with an advantage of
;;; readable error messages.

;; Simple lexer: splits the stream into tokens
(make-simple-lexer minim-lexer
  (ident-or-keyword
   (p.alpha ((p.alpha | p.digit) *))
    ident)
  (keywords input print nl goto if then else and or not is)
  (simple-tokens
   "[" LB "]" RB
   "(" LB ")" RB
   ">" > "<" < "=" = "++" ++ "--" --)
  (regexp-tokens
   (("\"" (((#\\ #\") | (! #\")) *) "\"") ->
    (·@ list->string cuttail cdr)) string
   (("\'" (((#\\ #\') | (! #\')) *) "\'") ->
    (·@ list->string cuttail cdr)) string
   p.integer.p                     number)
  (ignore p.whitespace))

;; Simple LL(1) parser
(bnf-parser ((programg parse-minim))

  (programg
   ((LB program RB) $1) ;; not quite conforming to formal spec,
                        ;; but required to run the test prog.
   )
  (program
   ((statement program) (cons $0 $1))
   ((statement) (list $0)))

  (statement
   ((LB ident:va is val:vl RB) `(Ass ,va ,vl))
   ((LB ++ ident:va RB) `(++ ,va))
   ((LB -- ident:va RB) `(-- ,va))
   ((LB goto ident:tag  RB) `(Gto ,tag))
   ((LB if test:tst then statement:s1 else statement:s2 RB)
    `(Cnd ,tst ,s1 ,s2))
   ((LB print string:str RB)
    `(PrntStr ,str))
   ((LB print val:v RB)
    `(PrntVal ,v))
   ((nl)
    `(PrntNl))
   ((LB input ident:va RB)
    `(Input ,va))
   ((ident)
    `(Tag ,$0)))

  (val
   ((ident) `(V ,$0))
   ((number) `(C ,$0)))

  (test
   ((LB val:v1 comp:c val:v2 RB) `(Comp ,c ,v1 ,v2))
   ((LB test:l and test:r RB) `(And ,l ,r))
   ((LB test:l or test:r RB) `(Or ,l ,r))
   ((LB not test:t RB) `(Not ,t)))

  (comp
   ((<) '<)
   ((>) '>)
   ((=) '=))
  )

;; Compiler frontend: macro which embedds the compiled IL assebmly
;; into the Lisp function and calls that function immediately.
;; In case of an error it prints the message, doing nothing.
(macro include-minim (fname)
  (try
   (let* ((ll (lex-and-parse minim-lexer parse-minim
			     (read-file-list fname)))
	  (lp (minim->cli ll))
	  (nm (gensym)))
     `(begin
	(function ,nm ()
           (n.asm ()
	   ,@lp
	   (Ldnull)
	   ))
	(,nm)
	))
   t_MBaseException
   (fun (e)
     (writeline `(Exception in minim loader: ,(mbaseerror e)))
     'nil)))

;; Now: test it.
(include-minim "test1.min")
From: MetaProgrammer
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1184936734.566717.180050@o61g2000hsh.googlegroups.com>
P.S.: in order to build standalone Minim executables,
change the last macro into the following:

;; Compiler frontend: macro which embedds the compiled IL assebmly
;; into the Lisp function and calls that function immediately.
;; In case of an error it prints the message, doing nothing.
(macro include-minim-f (nm fname)
  (try
   (let* ((ll (lex-and-parse minim-lexer parse-minim
			     (read-file-list fname)))
	  (lp (minim->cli ll)))
     `(function ,nm ()
	(n.asm ()
         ,@lp
	 (Ldnull)
	 )
	))
   t_MBaseException
   (fun (e)
     (writeline `(Exception in minim loader: ,(mbaseerror e)))
     'nil)))

(macro include-minim ( fname )
  (with-syms ( nm )
    `(top-begin
       (include-minim-f ,nm ,fname)
       (,nm))))

----

 And compile now the following file (using mbase /compiledll
<filename>):

(n.module mcomp exe)

(include "./minim.al")

;;; MINIM language compiler frontend for standalone executables.

(function main ( )
  (let ((fnm (car (a->l *CMDLINE*))))
    (read-int-eval '(n.module minim exe))
    (read-compile-eval
     `(include-minim-f main ,fnm))
    (read-int-eval '(save-module))
    ))

----

 After that, just do "mcomp ./test1.min", and "minim.exe" to run the
resulting program.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a33977$0$1617$ed2619ec@ptn-nntp-reader02.plus.net>
I finally managed to get a version working using the new camlp4 system to
implement an in-line extensible parser. The result is a 68-line interpreter
that runs more quickly than all other implementations so far:

open Camlp4.Sig;;
open Camlp4.Struct;;

let tags = ref [] and vars = ref [];;
let rec get m k =
  try List.assoc k !m with Not_found -> m := (k, ref 0) :: !m; get m k

let pc = ref 0;;

module Token = Token.Make(Loc);;
module Lexer = Lexer.Make(Token);;
module Gram = Grammar.Static.Make(Lexer);;

let program = Gram.Entry.mk "program";;
let program_aux = Gram.Entry.mk "program_aux";;
let statement = Gram.Entry.mk "statement";;
let value = Gram.Entry.mk "value";;
let test = Gram.Entry.mk "test";;
let comp = Gram.Entry.mk "comp";;

EXTEND Gram
  program:
  [ [ ss=LIST1 program_aux -> Array.of_list ss ] ];
  program_aux:
  [ [ s=statement -> incr pc; s ] ];
  statement:
  [ [ x=LIDENT; "is"; y=value -> `Assign(get vars x, y)
    | "++"; x=LIDENT -> `Incr(get vars x)
    | "--"; x=LIDENT -> `Decr(get vars x) ]
  | [ "if"; p=test; "then"; t=statement; "else"; f=statement -> `If(p, t,
f) ]
  | [ "goto"; t=LIDENT -> `Goto(get tags t) ]
  | [ t=LIDENT; s=statement -> get tags t := !pc; s ]
  | [ "print"; s=STRING -> `PrintString s
    | "print"; v=value -> `Print v
    | "nl" -> `PrintString "\n" ]
  | [ "input"; x=LIDENT -> `Input(get vars x) ] ];
  value:
  [ [ x=LIDENT -> `Var(get vars x)
    | n=INT -> `Int(int_of_string n) ] ];
  test:
  [ [ a=value; op=comp; b=value -> `Comp(op, a, b) ]
  | [ a=test; "and"; b=test -> `And(a, b) ]
  | [ a=test; "or"; b=test -> `Or(a, b) ]
  | [ "not"; a=test -> `Not a ] ];
  comp:
  [ [ "<" -> `Less | "=" -> `Equal | ">" -> `Greater ] ];
END;;

let eval = function `Int n -> (n : int) | `Var v -> !v

let rec test = function
  | `Comp(`Less, f, g) -> eval f < eval g
  | `Comp(`Equal, f, g) -> eval f = eval g
  | `Comp(`Greater, f, g) -> eval f > eval g
  | `And(f, g) -> test f && test g
  | `Or(f, g) -> test f || test g
  | `Not f -> not(test f)

let rec statement pc = function
  | `Assign(x, y) -> x := eval y; pc + 1
  | `Incr x -> incr x; pc + 1
  | `Decr x -> decr x; pc + 1
  | `If(p, t, f) -> statement pc (if test p then t else f)
  | `Goto tag -> !tag
  | `PrintString s -> print_string s; pc + 1
  | `Print x -> print_int(eval x); pc + 1
  | `Input x -> x := int_of_string(input_line stdin); pc + 1

let rec run program pc = run program (statement pc program.(pc))

let () =
  match Sys.argv with
  | [|_; file|] ->
      let ch = open_in file in
      let program = Gram.parse program Loc.ghost (Stream.of_channel ch) in
      close_in ch;
      (try run program 0 with _ -> ())
  | _ -> invalid_arg "Usage: ./minim <file>"

I think it is particularly interesting to note that the parser is shorter,
faster and more extensible than the Qi implementation even though the
target grammar is an s-expr!

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f80j9h$lkc$1@registered.motzarella.org>
Jon Harrop schrieb:
> I finally managed to get a version working using the new camlp4 system to
> implement an in-line extensible parser. The result is a 68-line interpreter
> that runs more quickly than all other implementations so far:
> 
> I think it is particularly interesting to note that the parser is shorter,
> faster and more extensible than the Qi implementation even though the
> target grammar is an s-expr!

The question is what you mean with "shorter".
In principle you can express a problem exactly the same in Lisp as you
could in OCaml (or Haskell).
These languages offer two interesting features: syntactic sugar for a
small set of lambdas (currying) and patter matching. Haskell also offers
lazyness which reduces the need for some macros (and can result in worse
performance).
On top of that there is not really much more that one can find in for
example OCaml. Currying and PM are also available in Lisp. In Lisp there
also is a dynamic environment and macros. These macros can also access
the Lisp environment at run time (well, any Lisp programming always
happens at runtime).
So, from that we know that in principle Lisp programs should be the same
(or shorter), complexity wise.

Counting lines makes not much sense for Lisp. Although it supports all
these programming paradigms it has a very unique style which will blow
up the LOC count in several cases. But from this it doesn't follow, that
coding takes longer.

This one liner:  (defun make-accumulator (n) (lambda (i) (incf n i)))
gets usually written in three visible lines:
(defun make-accumulator (n)
   (lambda (i)
     (incf n i)))

OCaml has a lot of syntax which allows generally to express algorithms
with a smaller number of lines/chars (compared to Lisp).

Or see this Haskell function:
powerset = foldr (\x ys -> ys ++ (map (x:) ys)) [[]]

In Lisp we can do exactly the same one liner. Here it is (also in one
line, in some sense):
(defun powerset (set)
   (reduce (lambda (x ys)
             (append ys (mapcar (lambda (y)
                                  (cons x y))
                                ys)))
           set
           :initial-value '(())
           :from-end t))

Lisp does not offer a separate foldr and foldl. Reduce is doing both.
But in the case of a foldr we need to add  :from-ent t
Here is the same Haskell code, indented with Lisp style:
powerset set =
   foldr (\x ys ->
           ++ ys (map (\y
                        x:y)
                      ys))
         [[]]
         set

If Haskell also would use one function for foldr and foldl we also had
to add this line.
So, these two functions do exactly the same. Haskell uses more syntatic
sugar which cuts down the byte count.

When we take this in mind then the Lisp code that was presented took up
much less lines. Maybe around your OCaml solution. But it is very hard
to compare.
But it seems that your OCaml programs execute the task much faster.
Conceptually the Lisp programs don't have to be more complicated.

 From that perspective I think your statement that your implementation
is shorter is not correct.


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f81nk5$lkq$1@online.de>
Andr� Thieme schrieb:
> Counting lines makes not much sense for Lisp. Although it supports all
> these programming paradigms it has a very unique style which will blow
> up the LOC count in several cases. But from this it doesn't follow, that
> coding takes longer.
> 
> This one liner:  (defun make-accumulator (n) (lambda (i) (incf n i)))
> gets usually written in three visible lines:
> (defun make-accumulator (n)
>   (lambda (i)
>     (incf n i)))

There are two answers to that:

1. Coding doesn't take longer, but you can't place the same amount of 
code on a screenful, so debugging and maintenance will take longer.
Note that your typical generic FPL not only fits on a line, it even 
takes less of a line; the syntactic Haskell equivalent of the above 
example would look like this:
   make-accumulator N = incf N
(No, Haskell isn't cheating, it simply doesn't have or need macros and 
quoting, so it can encode the same code with far less symbols.)
Now that's 27 instead of 52 characters, which means I can put nearly 
double the code on a single line without cramming it.
(I'd expect OCaml to be slightly more verbose. Jon?)

2. You can always count nodes in the AST instead of lines of code. For 
the above example, you'd end up at roughly the same figures for Lisp and 
your generic FPL, but as soon as you declare macros in Lisp, the FPL 
needs less nodes.
(There may be other effects. Jon?)

Regards,
Jo
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185187537.345904.327660@n60g2000hse.googlegroups.com>
Hi,

> the syntactic Haskell equivalent of the above
> example would look like this:
>    make-accumulator N = incf N

Not really.

First of all, INCF is a macro.  How do you curry a macro?  That
doesn't make much sense to me.

Second, INCF takes a place as its first argument, not a value.

Third, INCF takes a variable number of arguments.  How is the compiler
supposed to know wheter MAKE-ACCUMULATOR is of type Number a => a -> a
or of type Number a => a?

So yes, claiming that the above pieces of code are syntactically
equivalent _is_ cheating (macros are part of the syntax, after all).
You may argue about the utility of macros, but that's beside the
point, for the fact is, Common Lisp _does_ have macros (and places,
and variable number argument lists, both of which I find extremely
useful), and they're not going away anytime soon.

Haskell has its advantages over Common Lisp, of course, but it's
certainly not a "better Lisp", and its syntax is not "better S-
expressions but without macros", as macros are part of the _point_ of
S-expressions.

Do you want to be able to express common idioms more concisely, or do
you want to have the power to create your own idioms in a straight-
forward way?  It's a trade-off.  I have yet to see a syntax that is
both as flexible as and more concise than that of Common Lisp.

Mata ne,
Matthias
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a49c33$0$1609$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Benkard wrote:
> Do you want to be able to express common idioms more concisely, or do
> you want to have the power to create your own idioms in a straight-
> forward way?  It's a trade-off.  I have yet to see a syntax that is
> both as flexible as and more concise than that of Common Lisp.

Have a look at OCaml.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <071pi.21006$j7.379412@news.indigo.ie>
Jon Harrop wrote:

> Have a look at OCaml.

Turns out that has got some seriously fugly syntax.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xejiyli6k.fsf@ruckus.brouhaha.com>
Matthias Benkard <··········@gmail.com> writes:
> First of all, INCF is a macro.  How do you curry a macro?  That
> doesn't make much sense to me.

incf is a macro because macros are the only way to make Lisp forms
that don't evaluate their args.  Haskell uses lazy evaluation and
therefore all kinds of things that Lisp uses macros for, are done in
Haskell as ordinary functions.  Of course incf mutates its argument,
which normally isn't done in Haskell.  So you'd only code something
like incf as a monad action.
> 
> Third, INCF takes a variable number of arguments.  How is the compiler
> supposed to know wheter MAKE-ACCUMULATOR is of type Number a => a -> a
> or of type Number a => a?

Type inference.
From: =?UTF-8?B?QW5kcsOpIFRoaWVtZQ==?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85fg2$fqo$1@registered.motzarella.org>
Paul Rubin schrieb:
> Matthias Benkard <··········@gmail.com> writes:
>> First of all, INCF is a macro.  How do you curry a macro?  That
>> doesn't make much sense to me.
> 
> incf is a macro because macros are the only way to make Lisp forms
> that don't evaluate their args.

This is half true. What happens in Haskell automatically is what you
have to do in ML and Lisp manually - embed code blocks in an (anonymous)
function object. In Lisp you can provide your own version of eval and
make code lazy by default.


> Haskell uses lazy evaluation and
> therefore all kinds of things that Lisp uses macros for, are done in
> Haskell as ordinary functions.

I think most trivial macros can be done with this lazy way in Haskell.
At the cost of runtime overhead.
One example are reader macros.
People sometimes complain about mathmatical notation in Lisp.
I complain about the one in other programming languages. In Lisp one
would write [7x₆ + 9π³ - 6ˣ]
vs          7*x[6] + 9*pi*pi*pi - pow(6, x)

Of course you could do the same in Haskell:
readableMath"7x₆ + 9π³ - 6ˣ"

At runtime some parser would begin its work. In Lisp it gets translated
into efficient machine code at compile time. But if someone wants he/she
could do it the same way as an OCaml or Haskell or Erlang user would.
I am also not sure how the less than 16 LOC of
http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html
would look in one of these languages.


> Of course incf mutates its argument,
> which normally isn't done in Haskell.  So you'd only code something
> like incf as a monad action.

Would you provide the one liner that is doing incf?


André
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f84scn$eng$1@online.de>
Matthias Benkard schrieb:
>> the syntactic Haskell equivalent of the above
>> example would look like this:
>>    make-accumulator N = incf N
> 
> Not really.
> 
> First of all, INCF is a macro.

That's why I wrote "syntactic equivalent".
I was all talking about the overhead of having parentheses.

 > How do you curry a macro?  That doesn't make much sense to me.

I don't see any problems applying currying to macros that wouldn't apply 
to functions, or vice versa.
You can explain currying as a purely syntactic device. You can "explain 
it away" for functions by resorting to HOFs (and that's a useful 
perspective for some questions around currying), but you don't have to.

> Second, INCF takes a place as its first argument, not a value.

Seems like a macro thing to me.

> Third, INCF takes a variable number of arguments.  How is the compiler
> supposed to know wheter MAKE-ACCUMULATOR is of type Number a => a -> a
> or of type Number a => a?

There's no difference. In Haskell, make-accumulator would be exactly 
equivalent to incf.

> So yes, claiming that the above pieces of code are syntactically
> equivalent _is_ cheating (macros are part of the syntax, after all).
> You may argue about the utility of macros, but that's beside the
> point, for the fact is, Common Lisp _does_ have macros (and places,
> and variable number argument lists, both of which I find extremely
> useful), and they're not going away anytime soon.

There is no difference between a macro and a function in Haskell.

In Haskell, there is no semantic difference between compile-time and 
run-time evaluation, so any macro would be a function and vice versa. 
(That's a general property of pure languages, and not due to Haskell's 
nonstrict evaluation strategy.)
You *can* have macros in Haskell (just plop in a preprocessor), but they 
aren't nearly as pressingly needed as in an impure language. (That may 
be the reason why preprocessors are more en vogue for OCaml than for 
Haskell.)

> I have yet to see a syntax that is
> both as flexible as and more concise than that of Common Lisp.

Drop the superfluous parentheses, for example. A minimum amount of 
operator precedence and layout rules eliminates 99% of them.
That's "just lexical conciseness", you'll say, and you'd be correct. 
However, when I look at the sheer percentage of screen estate these 
parentheses are taking up, it's getting too much.

Regards,
Jo
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fYmpi.21023$j7.379519@news.indigo.ie>
Joachim Durchholz wrote:

> Drop the superfluous parentheses, for example. A minimum amount of 
> operator precedence and layout rules eliminates 99% of them.

And makes the code massively more annoying to read. In fact,
the baroque syntax is the main reason I dislike Haskell (Liskell borders
on interesting though, there might be a reasonable language buried
underneath the syntax afflicting Haskell).  I find Lisp,  Forth and APL 
pleasant to read largely due to their simple syntax without complicated
precedence. You may be  different, but not everyone's preferences are
the same.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a61492$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> And makes the code massively more annoying to read. In fact,
> the baroque syntax is the main reason I dislike Haskell (Liskell borders
> on interesting though, there might be a reasonable language buried
> underneath the syntax afflicting Haskell).  I find Lisp,  Forth and APL
> pleasant to read largely due to their simple syntax without complicated
> precedence. You may be  different, but not everyone's preferences are
> the same.

Right. This is a really a reflection of modern statically-typed FPLs being
languages for smart people. If you're still struggling with the precedence
of + and * then you've little hope of mastering any non-trivial type
system.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a62862$0$1627$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> I don't "struggle".  I just regard it as annoying.

If you find 1+2 "annoying" then I doubt even Lisp can help.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: =?UTF-8?B?QW5kcsOpIFRoaWVtZQ==?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85ftq$hfg$1@registered.motzarella.org>
Jon Harrop schrieb:
> David Golden wrote:
>> I don't "struggle".  I just regard it as annoying.
> 
> If you find 1+2 "annoying" then I doubt even Lisp can help.

I find it annyoing that in OCaml I have to say
(Int64.to_float (Int64.sub (Int64.mul q (Int64.of_int n)) (Int64.mul s 
s))) /. (float n)

instead of simply

[(qn - s²) / n)]

and can't something like
(when [√2 ≈ 1,41]  (print "Hello"))


André
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a71283$0$1596$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Jon Harrop schrieb:
>> David Golden wrote:
>>> I don't "struggle".  I just regard it as annoying.
>> 
>> If you find 1+2 "annoying" then I doubt even Lisp can help.
> 
> I find it annyoing that in OCaml I have to say
> (Int64.to_float (Int64.sub (Int64.mul q (Int64.of_int n)) (Int64.mul s
> s))) /. (float n)

You can remove some parentheses and open the Int64 module to simplify it a
bit:

# open Int64;;
# let f q n s = to_float(sub (mul q (of_int n)) (mul s s)) /. float n;;
val f : int64 -> int -> int64 -> float = <fun>

Also, you can define or override infix operators locally or create new
operators:

# let f q n s =
    let ( - ) = sub and ( * ) = mul in
    to_float(q * of_int n - s * s) /. float n;;
val f : int64 -> int -> int64 -> float = <fun>

> instead of simply
> 
> [(qn - s�) / n)]

You can certainly write a macro that implements that syntax but the
resulting code would not be efficient as there is no static type
information.

In this particular case, are you sure you can't just use floats throughout?

> and can't something like
> (when [?2 ? 1,41]  (print "Hello"))

You can define a =~ operator:

# let ( =~ ) x y = abs_float(x -. y) < sqrt epsilon_float;;
val ( =~ ) : float -> float -> bool = <fun>

and use it in the guard of a pattern match:

# match sqrt 2., 1.414213562 with
  | x, y when x =~ y -> "Similar"
  | _ -> "Dissimilar";;
- : string = "Similar"

However, approximate equality on floats is discouraged in any language. I
would recommend interval arithmetic instead.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Bourguignon
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87d4yhdio5.fsf@voyager.informatimago.com>
Joachim Durchholz <··@durchholz.org> writes:
>> I have yet to see a syntax that is
>> both as flexible as and more concise than that of Common Lisp.
>
> Drop the superfluous parentheses, for example. A minimum amount of
> operator precedence and layout rules eliminates 99% of them.

Oops!  Now you'll have to teach this minimum amount of operator
precedence and layout rules to the billions of macros out there.

And each macro will become much bigger and more complex...

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
Wanna go outside.
Oh, no! Help! I got outside!
Let me back inside!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a613fa$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Bourguignon wrote:
> Joachim Durchholz <··@durchholz.org> writes:
>>> I have yet to see a syntax that is
>>> both as flexible as and more concise than that of Common Lisp.
>>
>> Drop the superfluous parentheses, for example. A minimum amount of
>> operator precedence and layout rules eliminates 99% of them.
> 
> Oops!  Now you'll have to teach this minimum amount of operator
> precedence and layout rules to the billions of macros out there.
> 
> And each macro will become much bigger and more complex...

OCaml and Haskell already handle this just fine.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85a59$vv8$1@online.de>
Pascal Bourguignon schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
>>> I have yet to see a syntax that is
>>> both as flexible as and more concise than that of Common Lisp.
>> Drop the superfluous parentheses, for example. A minimum amount of
>> operator precedence and layout rules eliminates 99% of them.
> 
> Oops!  Now you'll have to teach this minimum amount of operator
> precedence and layout rules to the billions of macros out there.

Nonsense. Stick with a few hard-and-fast rules, and you can get rid of 
most parentheses.
The usual arithmetic/comparison/boolean/definitional hierarchy will do 
fine, with the additional twist that juxtaposition is an operator that 
binds most tightly (so sin x + 5 is (sin x) + 5, just as mathematical 
tradition would have it).

User-defined operators shouldn't get additional precedences. Unless 
you're doing an embedded language and have collected a *lot* empirical 
data about operator usage in it, and are quite confident that you got 
the precedences right. (Even Pascal got the relative precedence of 
boolean and comparison operators wrong. And Niklas Wirth certainly has 
spent a great deal of care on that one.)

Oh, and actually precedence shouldn't be attached to the function 
definitions but to the operator symbols. We're talking initial parsing 
here (whether by humand or machine), that's the step that happens before 
semantics of any kind comes into play.
Even if you redefine +, it should keep its precedence level after all...

Regards,
Jo
From: Pascal Bourguignon
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87sl7daek7.fsf@voyager.informatimago.com>
Joachim Durchholz <··@durchholz.org> writes:

> Pascal Bourguignon schrieb:
>> Joachim Durchholz <··@durchholz.org> writes:
>>>> I have yet to see a syntax that is
>>>> both as flexible as and more concise than that of Common Lisp.
>>> Drop the superfluous parentheses, for example. A minimum amount of
>>> operator precedence and layout rules eliminates 99% of them.
>>
>> Oops!  Now you'll have to teach this minimum amount of operator
>> precedence and layout rules to the billions of macros out there.
>
> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> most parentheses.

Like in:

  #define a(x,y) x*y

perhaps?

To be able to use this macro as:

  (int)a(1.0+2.0,3.0+4.0)

you need to actually define it as:

  #define a(x,y) ((x)*(y))

so much for operator precedence and layout rules.

In lisp, you merely write it as:

  (defmacro a (x y) `(* ,x ,y))

less parentheses than in C!

 
-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

THIS IS A 100% MATTER PRODUCT: In the unlikely event that this
merchandise should contact antimatter in any form, a catastrophic
explosion will result.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f87b3f$6g6$1@online.de>
Pascal Bourguignon schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
> 
>> Pascal Bourguignon schrieb:
>>> Joachim Durchholz <··@durchholz.org> writes:
>>>>> I have yet to see a syntax that is
>>>>> both as flexible as and more concise than that of Common Lisp.
>>>> Drop the superfluous parentheses, for example. A minimum amount of
>>>> operator precedence and layout rules eliminates 99% of them.
>>> Oops!  Now you'll have to teach this minimum amount of operator
>>> precedence and layout rules to the billions of macros out there.
>> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
>> most parentheses.
> 
> Like in:
> 
>   #define a(x,y) x*y
> 
> perhaps?

If you really thought I were advocating C macros, you should seriously 
consider getting professional help.

> To be able to use this macro as:
> 
>   (int)a(1.0+2.0,3.0+4.0)
> 
> you need to actually define it as:
> 
>   #define a(x,y) ((x)*(y))
> 
> so much for operator precedence and layout rules.

Strawman argument.

First, I've been talking about precedences in functions, not in macros. 
(The only language that I know where this kind of problem arises even in 
functions is Tcl. Guess which language I'll refuse to program in.)

Second, the C preprocessor does substitution at the character level 
(sometimes at the token level if it's more recent than K&R C). That's a 
far cry from modern preprocessing which does the substitutions at the 
AST level, where this kind of problem doesn't arise in the first place.

> In lisp, you merely write it as:
> 
>   (defmacro a (x y) `(* ,x ,y))
> 
> less parentheses than in C!

Suggesting pro-C advocacy is considered a serious offense here in 
comp.lang.functional, you know ;-)

Regards,
jo
From: Pascal Bourguignon
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <877iooadtv.fsf@voyager.informatimago.com>
Joachim Durchholz <··@durchholz.org> writes:

> Pascal Bourguignon schrieb:
>> Joachim Durchholz <··@durchholz.org> writes:
>>
>>> Pascal Bourguignon schrieb:
>>>> Joachim Durchholz <··@durchholz.org> writes:
>>>>>> I have yet to see a syntax that is
>>>>>> both as flexible as and more concise than that of Common Lisp.
>>>>> Drop the superfluous parentheses, for example. A minimum amount of
>>>>> operator precedence and layout rules eliminates 99% of them.
>>>> Oops!  Now you'll have to teach this minimum amount of operator
>>>> precedence and layout rules to the billions of macros out there.
>>> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
>>> most parentheses.
>>
>> Like in:
>>
>>   #define a(x,y) x*y
>>
>> perhaps?
>
> If you really thought I were advocating C macros, you should seriously
> consider getting professional help.

Neither did I think that, neither did I advocate anything.  I only too
C as an example of a language with "A minimum amount of operator
precedence and layout rules to eliminate 99% of them [parentheses]."


>> To be able to use this macro as:
>>
>>   (int)a(1.0+2.0,3.0+4.0)
>>
>> you need to actually define it as:
>>
>>   #define a(x,y) ((x)*(y))
>>
>> so much for operator precedence and layout rules.
>
> Strawman argument.
>
> First, I've been talking about precedences in functions, not in
> macros. (The only language that I know where this kind of problem
> arises even in functions is Tcl. Guess which language I'll refuse to
> program in.)

This is irrelevant.


> Second, the C preprocessor does substitution at the character level
> (sometimes at the token level if it's more recent than K&R C). 

This is irrelevant.


> That's a far cry from modern preprocessing which does the
> substitutions at the AST level, where this kind of problem doesn't
> arise in the first place.

That's my point!  There is NO parenthesis in lisp!
To wit:

* (dump-thing '(defun dump-thing (thing &optional (level 0))
                (if (atom thing)
                    (format t "~VA An atom: ~S~%" level "" thing)
                    (progn
                      (format t "~VA A cons cell:~%" level "")
                      (dump-thing (car thing) (1+ level))
                      (dump-thing (cdr thing) (1+ level))))))
 A cons cell:
  An atom: DEFUN
  A cons cell:
   An atom: DUMP-THING
   A cons cell:
    A cons cell:
     An atom: THING
     A cons cell:
      An atom: &OPTIONAL
      A cons cell:
       A cons cell:
        An atom: LEVEL
        A cons cell:
         An atom: 0
         An atom: NIL
       An atom: NIL
    A cons cell:
     A cons cell:
      An atom: IF
p      A cons cell:
       A cons cell:
        An atom: ATOM
        A cons cell:
         An atom: THING
         An atom: NIL
       A cons cell:
        A cons cell:
         An atom: FORMAT
         A cons cell:
          An atom: T
          A cons cell:
           An atom: "~VA An atom: ~S~%"
           A cons cell:
            An atom: LEVEL
            A cons cell:
             An atom: ""
             A cons cell:
              An atom: THING
              An atom: NIL
        A cons cell:
         A cons cell:
          An atom: PROGN
          A cons cell:
           A cons cell:
            An atom: FORMAT
            A cons cell:
             An atom: T
             A cons cell:
              An atom: "~VA A cons cell:~%"
              A cons cell:
               An atom: LEVEL
               A cons cell:
                An atom: ""
                An atom: NIL
           A cons cell:
            A cons cell:
             An atom: DUMP-THING
             A cons cell:
              A cons cell:
               An atom: CAR
               A cons cell:
                An atom: THING
                An atom: NIL
              A cons cell:
               A cons cell:
                An atom: 1+
                A cons cell:
                 An atom: LEVEL
                 An atom: NIL
               An atom: NIL
            A cons cell:
             A cons cell:
              An atom: DUMP-THING
              A cons cell:
               A cons cell:
                An atom: CDR
                A cons cell:
                 An atom: THING
                 An atom: NIL
               A cons cell:
                A cons cell:
                 An atom: 1+
                 A cons cell:
                  An atom: LEVEL
                  An atom: NIL
                An atom: NIL
             An atom: NIL
         An atom: NIL
     An atom: NIL
NIL
* 


See?  Absolutely NO parenthesis can be dumped.       


>> In lisp, you merely write it as:
>>
>>   (defmacro a (x y) `(* ,x ,y))
>>
>> less parentheses than in C!
>
> Suggesting pro-C advocacy is considered a serious offense here in
> comp.lang.functional, you know ;-)

This was anti C, as an example of anti-what you get when you add "A
minimum amount of operator precedence and layout rules [to] eliminate
99% of them [parentheses]."



Perhaps you should try to explicit your "few hard-and-fast rules"...

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

This universe shipped by weight, not volume.  Some expansion may have
occurred during shipment.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f87j1m$fj7$1@online.de>
Bah. Answering is pointless if the other person just redefines the topic 
to make a point.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a79433$0$1604$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Bourguignon wrote:
> There is NO parenthesis in lisp!
> 
> * (dump-thing '(defun dump-thing (thing &optional (level 0))
>                 (if (atom thing)
>                     (format t "~VA An atom: ~S~%" level "" thing)
>                     (progn
>                       (format t "~VA A cons cell:~%" level "")
>                       (dump-thing (car thing) (1+ level))
>                       (dump-thing (cdr thing) (1+ level))))))

I think that example was suboptimal... :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Christopher Browne
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <603azdxzwv.fsf@dba2.int.libertyrms.com>
Joachim Durchholz <··@durchholz.org> writes:
> Pascal Bourguignon schrieb:
>> Joachim Durchholz <··@durchholz.org> writes:
>>>> I have yet to see a syntax that is
>>>> both as flexible as and more concise than that of Common Lisp.
>>> Drop the superfluous parentheses, for example. A minimum amount of
>>> operator precedence and layout rules eliminates 99% of them.
>> Oops!  Now you'll have to teach this minimum amount of operator
>> precedence and layout rules to the billions of macros out there.
>
> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> most parentheses.

The best commentary that I ever saw on this is the following:

"Parentheses?  What parentheses? I haven't noticed any parentheses
since my first month of Lisp programming.  I like to ask people who
complain about parentheses in Lisp if they are bothered by all the
spaces between words in a newspaper..."  -- Kenny Tilton <····@liii.com>
-- 
(reverse (concatenate 'string "ofni.sesabatadxunil" ·@" "enworbbc"))
http://cbbrowne.com/info/languages.html
"They laughed at Columbus, they laughed at Fulton, they laughed at the
Wright brothers.  But they also laughed at Bozo the Clown."
-- Carl Sagan
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ulkd549ag.fsf@nhplace.com>
[ comp.lang.lisp only
  http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Christopher Browne <········@ca.afilias.info> writes:

> > Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> > most parentheses.
> 
> The best commentary that I ever saw on this is the following:
> 
> "Parentheses?  What parentheses? I haven't noticed any parentheses
> since my first month of Lisp programming.  I like to ask people who
> complain about parentheses in Lisp if they are bothered by all the
> spaces between words in a newspaper..."  -- Kenny Tilton <····@liii.com>

Actually, although I frequently enjoy his irreverent wit, I think he's
slightly the mark on this one, since in fact all those spaces are
actually shared as token separators between CL and English text.
Perhaps periods and commas would be a closer analogy.

I started programming in FORTRAN and BASIC before I arrived at MIT, so
I could have stuck to my early upbringing and insisted on infix.  I
learned Lisp in a rapid-fire sequence of courses from Weizenbaum,
Winston, and Sussman at MIT and one of several things that attracted
me to it was its LACK of syntax.  Doing something like natural language
processing and being able to entirely leap over issues of programming 
syntax and straight to operating on end-user-intelligible data such as:

 (THIS IS SOME PROGRAM DATA THAT YOU CAN WRITE INTERESTING PROGRAMS ABOUT)

was very empowering.  Other languages generally do not allow this.
If Eliza had first been written with notations like ["THIS","IS","SOME",...]
or #{#"THIS",#"IS",...}, I very much doubt I would have found it as
compelling.

To see this notational simplicity maligned as "too complicated" leaves
me wondering whether to laugh or cry.  What is often summed up as "all
those parens" is often some distorted code for "the LACK of more
complicated syntax".  And so I just don't get it.

... except in the case of infix math.  And it's not very hard to write
an escape syntax to get in and out of that if you need it.  It's
surely quite hard to write escape syntax to get in and out of Lisp
notation in other languages.
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87d4yh7cmq.fsf@pu100877.student.princeton.edu>
Joachim Durchholz <··@durchholz.org> writes:

> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> most parentheses.

I wish that a blessed month could go by without somebody proposing
this.

Tamas
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185367782.688994.75370@19g2000hsx.googlegroups.com>
On Jul 24, 10:47 pm, Tamas Papp <······@gmail.com> wrote:
> Joachim Durchholz <····@durchholz.org> writes:
> > Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> > most parentheses.
>
> I wish that a blessed month could go by without somebody proposing
> this.
>
> Tamas

That's a definitely bad, as it would mean there's nobody who's
starting to learn  lisp. Some of them will whine and leave, the rest
will stay.

Why do you prefer lisp over Ocaml/F# ?
I would rather been called a savage than a spammer.
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <878x9478ik.fsf@pu100877.student.princeton.edu>
Slobodan Blazeski <·················@gmail.com> writes:

> On Jul 24, 10:47 pm, Tamas Papp <······@gmail.com> wrote:
>> Joachim Durchholz <····@durchholz.org> writes:
>> > Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
>> > most parentheses.
>>
>> I wish that a blessed month could go by without somebody proposing
>> this.
>>
>> Tamas
>
> That's a definitely bad, as it would mean there's nobody who's
> starting to learn  lisp. Some of them will whine and leave, the rest
> will stay.

I don't think that people who seriously want to learn Lisp go through
the whine-about-parenthesis stage.  Anyone with at least a nontrivial
interest in Lisp will read the first few chapters of a Lisp book, and
by that time, they will see the light.

My impression is that people who whine about parens have heard about
how good Lisp is, didn't get it and started to complain about
something trivial, or have been forced to learn some very basic Lisp
in a bad college course and resent that.

> Why do you prefer lisp over Ocaml/F# ?

Fortunately, my acquaintance with Ocaml/F# is cursory, I have looked
at Ocaml once and decided that it is not for me.  I tried Haskell last
summer, but for the stuff I am doing (numerical analysis) lazy
evaluation is not practical.  Lisp was almost perfect from the first
moment, even before I learned about macros.

> I would rather been called a savage than a spammer.

?

Tamas
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185458199.707522.126450@g4g2000hsf.googlegroups.com>
On Jul 25, 6:28 pm, Tamas Papp <······@gmail.com> wrote:
> Slobodan Blazeski <·················@gmail.com> writes:
> > On Jul 24, 10:47 pm, Tamas Papp <······@gmail.com> wrote:
> >> Joachim Durchholz <····@durchholz.org> writes:
> >> > Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
> >> > most parentheses.
>
> >> I wish that a blessed month could go by without somebody proposing
> >> this.
>
> >> Tamas
>
> > That's a definitely bad, as it would mean there's nobody who's
> > starting to learn  lisp. Some of them will whine and leave, the rest
> > will stay.
>
> I don't think that people who seriously want to learn Lisp go through
> the whine-about-parenthesis stage.  Anyone with at least a nontrivial
> interest in Lisp will read the first few chapters of a Lisp book, and
> by that time, they will see the light.
>
> My impression is that people who whine about parens have heard about
> how good Lisp is, didn't get it and started to complain about
> something trivial, or have been forced to learn some very basic Lisp
> in a bad college course and resent that.

I disliked lisp syntax , too many parens, at first. And what will be
this group without me and especially without my spelling errors?
>
> > Why do you prefer lisp over Ocaml/F# ?
>
> Fortunately, my acquaintance with Ocaml/F# is cursory, I have looked
> at Ocaml once and decided that it is not for me.  I tried Haskell last
> summer, but for the stuff I am doing (numerical analysis) lazy
> evaluation is not practical.  Lisp was almost perfect from the first
> moment, even before I learned about macros.
>
> > I would rather been called a savage than a spammer.
>
> ?

Google for savegs of cll or savages or lisp and you'll find a lot of
*nice discussions*. It reminds me of trolls non-stop posting that
FreeBSD is duying in the forums that most of the search hits for
FreeBSD leaded to those troll remarks. My favourite part is:

90% of Lispers give the other 10% a bad name, but generalizing in this
case
is like racism.
http://mail.python.org/pipermail/python-list/2003-October/230910.html

As per spammer word, lately every mention of OCaml / F# makes me feel
spammed . I wonder why?


"We have to improve our image in the internet community."
"Lets do a mass unsolicited email campaign to tell everyone how nice
we are."
<later> "You have the look of a man who was just put in charge of
implementing his own sarcastic suggestion."
        - The Pointy Haired Boss, Dilbert & Dogbert
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a70c57$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
Tamas Papp wrote:
> Joachim Durchholz <··@durchholz.org> writes:
>> Nonsense. Stick with a few hard-and-fast rules, and you can get rid of
>> most parentheses.
> 
> I wish that a blessed month could go by without somebody proposing
> this.

Amen to that. You might as well go back to caveman and propose fire...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185368046.601227.203750@22g2000hsm.googlegroups.com>
Hi,


> > First of all, INCF is a macro.
>
> That's why I wrote "syntactic equivalent".
> I was all talking about the overhead of having parentheses.

Well, what I was trying to say was that the given pieces of code were
_not_ syntactically equivalent, because in Lisp, INCF is a part of the
syntax, whereas in Haskell, it is not.


>  > How do you curry a macro?  That doesn't make much sense to me.
>
> I don't see any problems applying currying to macros that wouldn't apply
> to functions, or vice versa.

Hmm.  Maybe you're right about that.  I will have to think about this
a bit.


> > Second, INCF takes a place as its first argument, not a value.
>
> Seems like a macro thing to me.

Yes, that's the point.  In other languages, you would need an
additional syntactic device to distinguish places from values.  As
INCF defines its own syntax, that is not necessary in Lisp.


> There is no difference between a macro and a function in Haskell.
>
> In Haskell, there is no semantic difference between compile-time and
> run-time evaluation, so any macro would be a function and vice versa.

Macros are different from functions not only in their time of
evaluation, but also in their semantics.  Their purpose is not
precomputing stuff at compile-time, but the definition of new syntax
and transformation of code.

Doing evaluation prior to run-time can be done without macros.  That's
what compiler macros, EVAL-WHEN and LOAD-TIME-VALUE are for (among
others).  (Yeah, the terminology is confusing.  Macros, reader macros
and compiler macros are all different things.)  Also note that some
Lisp interpreters (rather than compilers) do macroexpansion at run-
time, so the reverse is true as well.


> You *can* have macros in Haskell (just plop in a preprocessor), but they
> aren't nearly as pressingly needed as in an impure language. (That may
> be the reason why preprocessors are more en vogue for OCaml than for
> Haskell.)

That may be one reason.  Another reason may be the higher complexity
and intrusiveness of macros in a language with complex syntax.  We can
only guess here.


> Drop the superfluous parentheses, for example. A minimum amount of
> operator precedence and layout rules eliminates 99% of them.
> That's "just lexical conciseness", you'll say, and you'd be correct.
> However, when I look at the sheer percentage of screen estate these
> parentheses are taking up, it's getting too much.

Oh yes, lexical conciseness is a form of conciseness all right.  I'd
love to see the syntax you're thinking of.

Mata ne,
Matthias
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f87kdh$h9v$1@online.de>
Matthias Benkard schrieb:
> Hi,
> 
> 
>>> First of all, INCF is a macro.
>> That's why I wrote "syntactic equivalent".
>> I was all talking about the overhead of having parentheses.
> 
> Well, what I was trying to say was that the given pieces of code were
> _not_ syntactically equivalent, because in Lisp, INCF is a part of the
> syntax, whereas in Haskell, it is not.

Heck, this entire subthread is about syntax and parentheses.
I don't get why people insist on changing the subject to to pick a nit.

I'm well aware that macros are a different concept than functions.

But that's not what I'm talking about; all I want to say is that Lisp's 
insistence on not having operator precedence comes at the price of 
having to write lots of parentheses when entering code. And that these 
parentheses, tiny as they are in isolation, take up a good deal of 
screen space because there are so many of them.

If I were talking about semantic differences, I'd have said that the 
difference between macros and functions vanishes in pure languages, so 
you can leave it to the compiler do decide what to consider a macro and 
what to consider a function.
I'd also have pointed out that the various quoting mechanisms are 
superfluous in Haskell; the subtle semantic differences between quoting 
mechanisms is what keeps me from using Lisp. (And possibly others, too; 
learning the differences is hard, and applying the right quoting 
mechanism and the right level of quoting can be difficult unless you 
know exactly what you're doing - there's a rather steep learning curve 
in that particular area of Lisp, an area that doesn't even exist in most 
other languages.)

>>> Second, INCF takes a place as its first argument, not a value.
>> Seems like a macro thing to me.
> 
> Yes, that's the point.  In other languages, you would need an
> additional syntactic device to distinguish places from values.  As
> INCF defines its own syntax, that is not necessary in Lisp.

What, then, is a place?

>> There is no difference between a macro and a function in Haskell.
>>
>> In Haskell, there is no semantic difference between compile-time and
>> run-time evaluation, so any macro would be a function and vice versa.
> 
> Macros are different from functions not only in their time of
> evaluation, but also in their semantics.  Their purpose is not
> precomputing stuff at compile-time, but the definition of new syntax
> and transformation of code.

Code transformation is done by the compiler in Haskell.
Abstraction (i.e. new languages) is daily staple in Haskell. Sure, 
Haskell embedded languages cannot do fancy syntax, but if you're fine 
with frugal syntax, you define your embedded language simply as a set of 
HOFs to use.
All without macros...

>> You *can* have macros in Haskell (just plop in a preprocessor), but they
>> aren't nearly as pressingly needed as in an impure language. (That may
>> be the reason why preprocessors are more en vogue for OCaml than for
>> Haskell.)
> 
> That may be one reason.  Another reason may be the higher complexity
> and intrusiveness of macros in a language with complex syntax.

Seems reasonable.

>> Drop the superfluous parentheses, for example. A minimum amount of
>> operator precedence and layout rules eliminates 99% of them.
>> That's "just lexical conciseness", you'll say, and you'd be correct.
>> However, when I look at the sheer percentage of screen estate these
>> parentheses are taking up, it's getting too much.
> 
> Oh yes, lexical conciseness is a form of conciseness all right.  I'd
> love to see the syntax you're thinking of.

Hmm... I think I already outlined it elsewhere.

Anyway, here goes:

First, a predefined set of operator precedences. Precedence levels would 
be the usual suspects:

Binding =
Boolean Additive |
Boolean Multiplicative &
Comparison < = > <= >= !=
Arithmetic Additive + -
Arithmetic Multiplicative * /

Throw in an "indendation is another way to write parentheses" rule.

Stick with parentheses otherwise.

Creating new precedence levels should be possible, but be a rare exception.

This is just a rough sketch.
The point of the indentation rule is that you can freely trade 
horizontal and vertical space. And you don't need closing parentheses 
anymore (I think humans concentrate on the beginning of a parentheseized 
expression, the ends are curiously uninteresting - and indentation rules 
are asymmetric, inverting the lines of a structurally indented program 
is not the same as inverting the order of symbols in parenthesized 
expressions).

Regards,
Jo
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <GMLpi.21038$j7.379492@news.indigo.ie>
Joachim Durchholz wrote:

> What, then, is a place?
> 
http://www.lisp.org/HyperSpec/Body/sec_5-1-1.html

By example - "increment the second element of the array
that is in b in an instance of structure s that is bound to a":
(incf (aref (s-b a) 1))

Perhaps not so relevant in a non-mutable/non-side-effecting context: The
primary purpose of the above incf form is presumably to mutate, though
it will return a value too.  
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8cgh5$shn$1@online.de>
David Golden schrieb:
> Joachim Durchholz wrote:
> 
>> What, then, is a place?
>>
> http://www.lisp.org/HyperSpec/Body/sec_5-1-1.html
> 
> By example - "increment the second element of the array
> that is in b in an instance of structure s that is bound to a":
> (incf (aref (s-b a) 1))

Seems like it's essentially an lvalue, only you can provide an algorithm 
to pinpoint it instead of passing it around directly.
I.e. it's equivalent to a function returning an lvalue (with a very 
simple syntax).

> Perhaps not so relevant in a non-mutable/non-side-effecting context:

Well, in modern FPLs, anonymous functions are almost the same as 
expressions syntactically, so writing a function that returns an lvalue 
should be a snap in OCaml or SML.
Actually, the technique would be so straightforward that I wouldn't even 
name it.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9c7b0$0$1631$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Well, in modern FPLs, anonymous functions are almost the same as
> expressions syntactically, so writing a function that returns an lvalue
> should be a snap in OCaml or SML.
> Actually, the technique would be so straightforward that I wouldn't even
> name it.

You can even see it in my Minim interpreter.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8bbu3$hv5$1@registered.motzarella.org>
Joachim Durchholz schrieb:

> I'm well aware that macros are a different concept than functions.
> 
> But that's not what I'm talking about; all I want to say is that Lisp's 
> insistence on not having operator precedence comes at the price of 
> having to write lots of parentheses when entering code. And that these 
> parentheses, tiny as they are in isolation, take up a good deal of 
> screen space because there are so many of them.

That Lisp has all these parens has mainly two reasons.
First: they organize code itself in trees, making it trivially 
transformable.
Translation into a syntax tree is not important anymore, because code
already is one. Having so easy accessible macros comes from representing
code inside one of the most flexible datastructures.
But even if we ignore that there is another design descision:
having functions that can take any number of arguments.
To make that possible you need two tokens. The parens just look
better than most other stuff.

One might think: hey, why not making the first argument to a function
call always the number of arguments that will follow?
Then no parens are needed. Instead of (+ 1 2 3) we would say:
+ 3 1 2 3
But then the problem is that an argument can be a function call itself.
So we need another token for that which means: function application will
follow. Let's make it "�". So instead of (+ 1 (* 1 2) 3) we say:
+ 3 1 $ * 2 1 2 3
Again we have two tokens. The new argument and �, instead of ( and ).
And if you ask me: I know which I prefer.

With the descision of allowing functions that take any number of
arguments Lisp has the nice feature that packing data into a list before
making it an argument is done by the compiler.
(+ 1 2 3) instead of (+ (list 1 2 3))


> What, then, is a place?

Do you know C? Think about something that is on the left side of a "=".
Left hand value.
x[5] = 100;
Then x[5] is the place. A place is a syntactical construct that looks
like accessing a value. x[5] will in nearly all cases get replaced by
the compiler with its value. But on the left side of a  =  it behaves
different. If x[5] == 17 then
x[5] = 100;
does not mean
17 = 100;


> Code transformation is done by the compiler in Haskell.
> Abstraction (i.e. new languages) is daily staple in Haskell. Sure, 
> Haskell embedded languages cannot do fancy syntax, but if you're fine 
> with frugal syntax, you define your embedded language simply as a set of 
> HOFs to use.

You can do the same in Lisp by making things lazy. In Lisp it would be
uglier than in Haskell because you would explicitily say
(delay (+ 1 2 3)) and (force xyz).
And yes, I think with lazy evaluation Haskell comes much closer to what
macros can do in a usable way than most other languages.


 > All without macros...

Lisp could also do it. But it does not have to rely on lazyness.
In Lisp you get very nice syntactical abstractions.
All without lazyness...



Andr�
-- 
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u4pjqisky.fsf@nhplace.com>
[ comp.lang.lisp only.
  http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

···@zedat.fu-berlin.de (Stefan Ram) writes:

> Andr� Thieme  <······························@justmail.de> writes:
> >That Lisp has all these parens has mainly two reasons.
> 
>   Historically, LISP started as a List-processing package 
>   for FORTRAN. The parentheses were in there, because this
>   was the list notation of that time.
> 
>   Then Steve Russel implemented EVAL - and the parentheses were
>   in the language because EVAL was evaluating those List.
> 
>   So actually, the parentheses notation was there first, then
>   followed the interpretation of its expressions as programs.

MACLISP, up until the late 1970's, I think, allowed commas as
whitespace characters, allowing one to write (a,b,c) meaning (a b c).
It wasn't until backquote came along that we had to give up commas a
"superfluous", at least in the Maclisp family of languages.  I kicked
and screamed a bit about that, because I liked using commas as
whitespace.  But I don't think most people made much noise about
losing the commas.  [Though there was quite a competition for the
detailed syntax/semantics of the notation, since there were lots of
private variations.]
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9370e$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Translation into a syntax tree is not important anymore, because code
> already is one.

Assuming, of course, that you never want to communicate with the outside
world, where everyone has ditched s-exprs in favour of a plethora of
domain-specific syntaxes: C, C++, Java, C#, regexps, dna, protein, html,
xml, svg, sql.

In practice, s-exprs are so rare outside Lisp/Scheme that a "syntax is not
important anymore" attitude gets you nowhere.

> Lisp could also do it.

Turing argument.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8bee0$ojh$1@registered.motzarella.org>
Jon Harrop schrieb:
> Andr� Thieme wrote:
>> Translation into a syntax tree is not important anymore, because code
>> already is one.
> 
> Assuming, of course, that you never want to communicate with the outside
> world, where everyone has ditched s-exprs in favour of a plethora of
> domain-specific syntaxes: C, C++, Java, C#, regexps, dna, protein, html,
> xml, svg, sql.

html is in nearly all cases like s-expressions plus noise and xml is
in all cases like s-expressions plus noise.
I don't know if there exist more lines of code in the languages C, C++,
Java and C# or more xml...


> In practice, s-exprs are so rare outside Lisp/Scheme that a "syntax is not
> important anymore" attitude gets you nowhere.

Although Java and C# are so called modern languages with lots of syntax
they are usually much more verbose than Lisp code.
Which Syntax is better for what case? As soon your language comes with
syntax someone else (the language designer) decided what the users have
to use. Every year a new language comes out and suggests to use a new
syntax. Each has its specific advantages. Lisp is the language that
initially comes without syntax. This puts you into the position to have
them all.


>> Lisp could also do it.
> 
> Turing argument.

You are not right this time. It is easy in Lisp.
Explicit laziness can be done in a few LOC. Look into PAIP.


Andr�
-- 
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfb1r3t.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.list]
On Fri, 27 Jul 2007 02:31:57 +0200, Andr� Thieme
<······························@justmail.de> wrote: 
> Jon Harrop schrieb:
[...]
>> Assuming, of course, that you never want to communicate with the outside
>> world, where everyone has ditched s-exprs in favour of a plethora of
>> domain-specific syntaxes: C, C++, Java, C#, regexps, dna, protein, html,
>> xml, svg, sql.
>
> html is in nearly all cases like s-expressions plus noise and xml is
> in all cases like s-expressions plus noise.

Almost everything that can be expressed as a tree can be thought as
s-expressions plus noise.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8chhh$top$1@online.de>
Andr� Thieme schrieb:
> That Lisp has all these parens has mainly two reasons.
> First: they organize code itself in trees, making it trivially 
> transformable.
> Translation into a syntax tree is not important anymore, because code
> already is one. Having so easy accessible macros comes from representing
> code inside one of the most flexible datastructures.

This argument applies to any kind of regular syntax.
Lisp could thrive with predefined operator precedence plus 
indentation-is-parentheses (plus parentheses for those cases where you 
still need them), have 90% less parentheses, 30% more code per page, and 
*still* be simple to process macrologically.

> But even if we ignore that there is another design descision:
> having functions that can take any number of arguments.
> To make that possible you need two tokens.

You can make that possible without the need for additional tokens.
Simply use currying.

> One might think: hey, why not making the first argument to a function
> call always the number of arguments that will follow?
> Then no parens are needed. Instead of (+ 1 2 3) we would say:
> + 3 1 2 3

Not a good idea anyway. You'd end up chasing number-of-arguments bugs.

> With the descision of allowing functions that take any number of
> arguments Lisp has the nice feature that packing data into a list before
> making it an argument is done by the compiler.
> (+ 1 2 3) instead of (+ (list 1 2 3))

OK, currying cannot handle that case; the Haskell equivalent of the 
above would be
   plus [1 2 3]

However, such lists are needed only in a few places in a program. I 
don't think that this case warrants special attention to make it easy - 
not if it constrains the design space elsewhere.

>> Code transformation is done by the compiler in Haskell.
>> Abstraction (i.e. new languages) is daily staple in Haskell. Sure, 
>> Haskell embedded languages cannot do fancy syntax, but if you're fine 
>> with frugal syntax, you define your embedded language simply as a set 
>> of HOFs to use.
> 
> You can do the same in Lisp by making things lazy.

Ah, but laziness cannot be efficiently implemented in Lisp.
Nor would that be very desirable in a language with mutation.

 > In Lisp it would be
> uglier than in Haskell because you would explicitily say
> (delay (+ 1 2 3)) and (force xyz).

The main difference is that modern non-Lisp FPLs leave out the outermost 
pair of parentheses :-)
(... and the parentheses at the outermost level between operators)

> And yes, I think with lazy evaluation Haskell comes much closer to what
> macros can do in a usable way than most other languages.

Then we agree here.

>  > All without macros...
> 
> Lisp could also do it. But it does not have to rely on lazyness.

You don't need laziness to get rid of most parentheses.
It's completely orthogonal to semantics.

> In Lisp you get very nice syntactical abstractions.
> All without lazyness...

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9efdf$0$1598$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Lisp could thrive with predefined operator precedence...

I disagree. The problems with Lisp run far deeper than syntactic issues.
Having such an archaic syntax forced upon you is one of the main reasons
people never bother starting with Lisp (poor performance is the other). But
Lisp lacks so many features that people take forgranted now. It just isn't
going anywhere.

> Ah, but laziness cannot be efficiently implemented in Lisp...

As Haskell has shown, laziness cannot be implemented efficient at all.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <oWnqi.18520$dp3.645316@phobos.telenet-ops.be>
Jon Harrop wrote:
> Joachim Durchholz wrote:
>> Lisp could thrive with predefined operator precedence...
> 
> I disagree. The problems with Lisp run far deeper than syntactic issues.
> Having such an archaic syntax forced upon you is one of the main reasons
> people never bother starting with Lisp (poor performance is the other). But
> Lisp lacks so many features that people take forgranted now. It just isn't
> going anywhere.
> 

There you go again =)

>> Ah, but laziness cannot be efficiently implemented in Lisp...
> 
> As Haskell has shown, laziness cannot be implemented efficient at all.
> 

As ruby and python have shown, programmers are only motivated by efficiency.

Sacha
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa1cef$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Sacha wrote:
>> As Haskell has shown, laziness cannot be implemented efficient at all.
> 
> As ruby and python have shown, programmers are only motivated by
> efficiency.

Theoretically good but practically bad performance is only suitable for
academics.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <6spqi.18638$8c5.901170@phobos.telenet-ops.be>
Jon Harrop wrote:
> Sacha wrote:
>>> As Haskell has shown, laziness cannot be implemented efficient at all.
>> As ruby and python have shown, programmers are only motivated by
>> efficiency.
> 
> Theoretically good but practically bad performance is only suitable for
> academics.
> 

I guess those guys at google are desperately theoretical.

Sacha
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa3ebb$0$1626$ed2619ec@ptn-nntp-reader02.plus.net>
Sacha wrote:
> I guess those guys at google are desperately theoretical.

Is someone at Google using a lazy language?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <4Zrqi.18850$hB1.1008831@phobos.telenet-ops.be>
Jon Harrop wrote:
> Sacha wrote:
>> I guess those guys at google are desperately theoretical.
> 
> Is someone at Google using a lazy language?
> 

I never said that.

 >>> As Haskell has shown, laziness cannot be implemented efficient at 
 >>> all.
 >>
 >> As ruby and python have shown, programmers are only motivated by
 >> efficiency.
 >
 >Theoretically good but practically bad performance is only suitable for
 >academics

I hear these guys at google use a fair bit of python. I might be 
mistaken, but my understanding was that this language is not very fast 
and would perform poorly in your ray-tracer benchmark. And yet, this 
rather successful company uses it. The services they provide are 
responsive enough in my opinion.

So it would seem raw language efficiency doesn't matter as much as you 
think when its about real life programming.

Abstraction is key, lazyness helps in this regard and comes at a price.

Of course it does.

The whole functional programming style is about abstraction over 
efficiency.

Sacha
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa728c$0$1608$ed2619ec@ptn-nntp-reader02.plus.net>
Sacha wrote:
> I hear these guys at google use a fair bit of python. I might be
> mistaken, but my understanding was that this language is not very fast
> and would perform poorly in your ray-tracer benchmark. And yet, this
> rather successful company uses it. The services they provide are
> responsive enough in my opinion.

Absolutely.

> So it would seem raw language efficiency doesn't matter as much as you
> think when its about real life programming.

Depends what you're doing.

> Abstraction is key, lazyness helps in this regard and comes at a price.
> 
> Of course it does.
> 
> The whole functional programming style is about abstraction over
> efficiency.

Not in OCaml. :-)

My comment was trying to say that lazy languages still appear to be
considerably slower than eager languages in the context of a hypothetical
lazy Lisp being slow. I wasn't saying that performance is always important
which, as you say, it isn't.

However, functional programming doesn't require you to sacrifice run-time
performance (much) as, I think, OCaml and Stalin-compiled Scheme have
shown.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ftuu$toa$1@registered.motzarella.org>
Jon Harrop schrieb:

> However, functional programming doesn't require you to sacrifice run-time
> performance (much) as, I think, OCaml and Stalin-compiled Scheme have
> shown.

In the long run it is only about productivity.
With compilers that nearly match human intelligence all programs would
result in the end in the same, highly optimized machine language output.
Those compilers are not available now, but I think many of us will see
them within their livetime.
Until then Lisp is for most things the best bet. It performs fast enough,
for most tasks close to C or faster (simply because it would be too
complicated to write a complex program in C).
It is not fixed into a specific paradigm, like functional programming
or logic programming. It is the only truly multiparadigm programming
language and one can create domain specific languages easier than any
other language. This means of course unbeatable productivity, as no
general purpose language could compete with a domain specific one.


Andr�
-- 
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <78odhu2svl.fsf@hod.lan.m-e-leypold.de>
> Jon Harrop schrieb:
>
>> However, functional programming doesn't require you to sacrifice run-time
>> performance (much) as, I think, OCaml and Stalin-compiled Scheme have
>> shown.
>
> In the long run it is only about productivity.
> With compilers that nearly match human intelligence all programs would
> result in the end in the same, highly optimized machine language output.
> Those compilers are not available now, but I think many of us will see
> them within their livetime.
> Until then Lisp is for most things the best bet. It performs fast enough,
> for most tasks close to C or faster (simply because it would be too
> complicated to write a complex program in C).
> It is not fixed into a specific paradigm, like functional programming
> or logic programming. It is the only truly multiparadigm programming
> language and one can create domain specific languages easier than any
> other language. This means of course unbeatable productivity, as no
> general purpose language could compete with a domain specific one.

Whereas you're without doubt partially right most of these points also
apply to -- tatatatAAA: Ocaml. For my taste your laudation is a bit
overcrowded with superlatives -- which damages credibility, and also
leaves us with the wuestion why Lisp hasn't replaced all other
languages in the industry (considering it's unbeatable one would
expect the industry to flock to Lisp in troves, wouldn't you?).

Not that I don't say Lixp is bad: But a more balanced point of view
(perhaps also accounting for a certain lack of adoption in recent
times and for certain weaknesses) would in my
eyes further your case better.

Regards -- Markus
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8lcsl$g2s$1@registered.motzarella.org>
Markus E.L. schrieb:
>> Jon Harrop schrieb:
>>
>>> However, functional programming doesn't require you to sacrifice run-time
>>> performance (much) as, I think, OCaml and Stalin-compiled Scheme have
>>> shown.
>> In the long run it is only about productivity.
>> With compilers that nearly match human intelligence all programs would
>> result in the end in the same, highly optimized machine language output.
>> Those compilers are not available now, but I think many of us will see
>> them within their livetime.
>> Until then Lisp is for most things the best bet. It performs fast enough,
>> for most tasks close to C or faster (simply because it would be too
>> complicated to write a complex program in C).
>> It is not fixed into a specific paradigm, like functional programming
>> or logic programming. It is the only truly multiparadigm programming
>> language and one can create domain specific languages easier than any
>> other language. This means of course unbeatable productivity, as no
>> general purpose language could compete with a domain specific one.
> 
> Whereas you're without doubt partially right most of these points also
> apply to -- tatatatAAA: Ocaml.

I wanted to express that OCaml is not a multiparadigm language, while
Lisp is one. OCaml (and even more so Haskell) is specialized on
functional programming. Take for example the one namespace.
Lisp has as many you want. But for functional programming it means that
you need  function or #'  and also  funcall. That adds (admittedly tiny)
extra complexity in the Lisp code... those will add up when doing nearly
completely functional programming.
Also Lisp has no implicit currying. So you get another bit of complexity
as you have to say (mapcar (curry #'* 5) list). In Haskell it would be
something like     map (* 5) l.
Whenever you decide for a feature in a programming language you decide
against others.
By orienting the syntax strongly on functional programming it becomes
more cumbersome to express other things.
Lisp basically comes with nothing, which is the price it pays for being
programmable. So first you have to work yourself on it or use a lib to
specialize. I for example can write:  (mapcar [* 5] list). I also got
rid of the lambda and oriented more at Haskells way to create anon
functions.


> For my taste your laudation is a bit overcrowded with superlatives

I did not try to impress you or win you as my new friend.


> -- which damages credibility, and also
> leaves us with the wuestion why Lisp hasn't replaced all other
> languages in the industry (considering it's unbeatable one would
> expect the industry to flock to Lisp in troves, wouldn't you?).

I don't know. The masses seem to mostly ignore Lisp, Erlang, Haskell,
OCaml and Prolog. And that although our languages are in our eyes
better designed that the languages that are used by the masses.


> Not that I don't say Lisp is bad:

Freudian slip? ;-)


Andr�
-- 
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3xodht4qa5.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. schrieb:

>> Not that I don't say Lisp is bad:
>
> Freudian slip? ;-)

Not quite. Should have been "Note that I don't say Lisp is bad" :-).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46af1ad9$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> I wanted to express that OCaml is not a multiparadigm language, while
> Lisp is one. OCaml (and even more so Haskell) is specialized on
> functional programming. Take for example the one namespace.
> Lisp has as many you want. But for functional programming it means that
> you need  function or #'  and also  funcall. That adds (admittedly tiny)
> extra complexity in the Lisp code... those will add up when doing nearly
> completely functional programming.
> Also Lisp has no implicit currying. So you get another bit of complexity
> as you have to say (mapcar (curry #'* 5) list). In Haskell it would be
> something like     map (* 5) l.
> Whenever you decide for a feature in a programming language you decide
> against others.
> By orienting the syntax strongly on functional programming it becomes
> more cumbersome to express other things.

You just gave examples of Lisp being cumbersome and concluded that OCaml and
Haskell are cumbersome. Can you give some examples where OCaml or Haskell
syntax is cumbersome?

> Lisp basically comes with nothing, which is the price it pays for being
> programmable.

How is Lisp any more programmable than, say, OCaml?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: =?utf-8?b?R2lzbGUgU8ODwqZsZW5zbWk=?= =?utf-8?b?bmRl?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <0nk5sgy7d7.fsf@kaktus.ii.uib.no>
Jon Harrop <···@ffconsultancy.com> writes:

> André Thieme wrote:
> 
> How is Lisp any more programmable than, say, OCaml?

One example is lisp macros that allow you to make domain-specific extansions.
I have personally extended my code with macros for lex and yacc like lexers/parsers
inside my code. To my knowledgem this could not be done as easily in OCaml.

Now this has not so much to do with dynamic typing of Lisp as it has with the
s-expression syntax. In fact Liskell (www.liskell.org) is an s-expression wrapper
to ordinary Haskell, that would make it possible to write similar kind of domain
specific code on a Haskell system. It is a quite new system, so I guess it is more
a proof of concept than a production system now. 

> 
> -- 
> Dr Jon D Harrop, Flying Frog Consultancy
> OCaml for Scientists
> http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

-- 
Gisle Sælensminde, Phd student, Scientific programmer
Computational biology unit, BCCS, University of Bergen, Norway, 
Email: ·····@cbu.uib.no
The best way to travel is by means of imagination
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46afb89e$0$1628$ed2619ec@ptn-nntp-reader02.plus.net>
Gisle Sælensminde wrote:
> One example is lisp macros that allow you to make domain-specific
> extansions. I have personally extended my code with macros for lex and
> yacc like lexers/parsers inside my code. To my knowledgem this could not
> be done as easily in OCaml.

Check out OCaml's revamped camlp4 macros. They are really rather cool. You
can rewrite at the term level, as Lisp's macros do, but you can also create
and extend LL grammars on-the-fly.

> Now this has not so much to do with dynamic typing of Lisp as it has with
> the s-expression syntax. In fact Liskell (www.liskell.org) is an
> s-expression wrapper to ordinary Haskell, that would make it possible to
> write similar kind of domain specific code on a Haskell system. It is a
> quite new system, so I guess it is more a proof of concept than a
> production system now.

I am much less familiar with Haskell but I thought it also had a macro
system?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ppdi$4lp$1@online.de>
Gisle Sælensminde schrieb:
> Jon Harrop <···@ffconsultancy.com> writes:
> 
>> André Thieme wrote:
>>
>> How is Lisp any more programmable than, say, OCaml?
> 
> One example is lisp macros that allow you to make domain-specific extansions.

A set of well-designed set of functions taking and constructing closures 
is a domain-specific language, too.
I have seen this technique applied in Haskell, and I see no reason why 
it wouldn't work in any language with closures and HOFs (including Lisp).

> I have personally extended my code with macros for lex and yacc like lexers/parsers
> inside my code. To my knowledgem this could not be done as easily in OCaml.

Use HOFs.
They will usually run during execution time, so you might have 
performance differences, but otherwise, the general flexibility should 
be the same.

> Now this has not so much to do with dynamic typing of Lisp as it has with the
> s-expression syntax.

I don't think that syntax plays a role for domain-specific sublanguages.

Sure, it's crucial for macros (you need a very regular syntax to make 
writing macros easy enough), but you don't need macros for DSLs.

Regards,
Jo
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185972130.276690.64720@d55g2000hsg.googlegroups.com>
On Aug 1, 1:05 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Gisle Sælensminde schrieb:
>
> > Jon Harrop <····@ffconsultancy.com> writes:
>
> >> André Thieme wrote:
>
> >> How is Lisp any more programmable than, say, OCaml?
>
> > One example is lisp macros that allow you to make domain-specific extansions.
>
> A set of well-designed set of functions taking and constructing closures
> is a domain-specific language, too.
> I have seen this technique applied in Haskell, and I see no reason why
> it wouldn't work in any language with closures and HOFs (including Lisp).
>
> > I have personally extended my code with macros for lex and yacc like lexers/parsers
> > inside my code. To my knowledgem this could not be done as easily in OCaml.
>
> Use HOFs.
> They will usually run during execution time, so you might have
> performance differences, but otherwise, the general flexibility should
> be the same.
>
> > Now this has not so much to do with dynamic typing of Lisp as it has with the
> > s-expression syntax.
>
> I don't think that syntax plays a role for domain-specific sublanguages.
>
> Sure, it's crucial for macros (you need a very regular syntax to make
> writing macros easy enough), but you don't need macros for DSLs.

Macros are just a shorthand, everything you could do with them you
could do without them. So you *could* write your DSL without macros
like (calculate-approximate-military-budget 2007) but they could save
you from writing a lot of boileplate code and help you catch a lot of
patterns. Macros  could make your code more concise,  they could
generate functions that elsewhere you *must* write by hand, becoming a
so-called human compiler. If there's a tool that could make your life
easier why not using it. Just look at Paul Graham's book On Lisp
freely available at http://www.paulgraham.com/onlisptext.html  . If
you don't find anything that could help your work becoming more easy
than maybe your domain and/or personality is not lisp-friendly. In my
case macros help me saving a mountains of typing even if I'm actually
avoiding to use them, as you can't funcall/apply them and I'm far from
experienced macro writer.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ncodhr4cvz.fsf@hod.lan.m-e-leypold.de>
> On Aug 1, 1:05 pm, Joachim Durchholz <····@durchholz.org> wrote:
>> Gisle Sælensminde schrieb:
>>
>> > Jon Harrop <····@ffconsultancy.com> writes:
>>
>> >> Andr� Thieme wrote:
>>
>> >> How is Lisp any more programmable than, say, OCaml?
>>
>> > One example is lisp macros that allow you to make domain-specific extansions.
>>
>> A set of well-designed set of functions taking and constructing closures
>> is a domain-specific language, too.
>> I have seen this technique applied in Haskell, and I see no reason why
>> it wouldn't work in any language with closures and HOFs (including Lisp).
>>
>> > I have personally extended my code with macros for lex and yacc like lexers/parsers
>> > inside my code. To my knowledgem this could not be done as easily in OCaml.
>>
>> Use HOFs.
>> They will usually run during execution time, so you might have
>> performance differences, but otherwise, the general flexibility should
>> be the same.
>>
>> > Now this has not so much to do with dynamic typing of Lisp as it has with the
>> > s-expression syntax.
>>
>> I don't think that syntax plays a role for domain-specific sublanguages.
>>
>> Sure, it's crucial for macros (you need a very regular syntax to make
>> writing macros easy enough), but you don't need macros for DSLs.
>
> Macros are just a shorthand, everything you could do with them you
> could do without them. So you *could* write your DSL without macros

Really? In a language which evaluates arguments to functions eagerly
-- how would I design new control structures without macros? I'm of
course only judging from a Scheme prespective, perhaps Lisp has
additional mechanisms that have been folded into macros in Scheme.

Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xfy32udqp.fsf@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
> Really? In a language which evaluates arguments to functions eagerly
> -- how would I design new control structures without macros? 

With first class functions and closures, of course.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <edhcnhpsur.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
>> Really? In a language which evaluates arguments to functions eagerly
>> -- how would I design new control structures without macros? 
>
> With first class functions and closures, of course.


if (C ...) (A ...) (B ....)

would be strict in both (A) and (B). Since the purpose of (C) might be
to avoid evaluation of one of both to _|_, I don't see how class
functions and closures would allow you to write new control structures
in the general case.

As an example, assume you got a if-contstruct (obviously not with eager
evaluated arguments) - how would you use closures and class functions
(whatever that might be?) to get a case-construct like

  case
     condition1  action1
     condition2  action2
     condition3  action3
     ...

(I leave the syntax to you).

I doubt can work, but perhaps I misunderstood what you wanted to say.

Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xd4y5mzic.fsf@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
> > With first class functions and closures, of course.
> if (C ...) (A ...) (B ....)
> 
> would be strict in both (A) and (B). Since the purpose of (C) might be
> to avoid evaluation of one of both to _|_, I don't see how class
> functions and closures would allow you to write new control structures
> in the general case.

You'd write something like: 

   if C: (funcall (lambda A)) 
   else: (funcall (lambda B))

Every so-called strict language has non-strict constructs.  The if
statement is an example of one.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <nr7iodo96s.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>> > With first class functions and closures, of course.
>> if (C ...) (A ...) (B ....)
>> 
>> would be strict in both (A) and (B). Since the purpose of (C) might be
>> to avoid evaluation of one of both to _|_, I don't see how class
>> functions and closures would allow you to write new control structures
>> in the general case.
>
> You'd write something like: 
>
>    if C: (funcall (lambda A)) 
>    else: (funcall (lambda B))
>
> Every so-called strict language has non-strict constructs.  The if
> statement is an example of one.

Not quite. I don't want to write

  case 

     fun () -> condition1   =>   fun () => action1
     fun () -> condition2   =>   fun () => action2
     fun () -> condition3   =>   fun () => action3

We're talking about creating new control structures from existing
ones, that explicitely involves getting the syntax right to a certain
amount (though I admit your point: If one completely ignores
syntactical requirements and imposes the burden of writing lambdas
around parts of the control structure's arguments, one can get by w/o
macros or lazy evaluation. Though this actually means making a point
in favour of the pro-Lisp-faction: Writing almost everything by hand
doesn't mean having new control structures: Lisp is nicer there.).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b26055$0$1612$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. 2 wrote:
> Not quite. I don't want to write
> 
>   case
> 
>      fun () -> condition1   =>   fun () => action1
>      fun () -> condition2   =>   fun () => action2
>      fun () -> condition3   =>   fun () => action3
> 
> We're talking about creating new control structures from existing
> ones, that explicitely involves getting the syntax right to a certain
> amount (though I admit your point: If one completely ignores
> syntactical requirements and imposes the burden of writing lambdas
> around parts of the control structure's arguments, one can get by w/o
> macros or lazy evaluation. Though this actually means making a point
> in favour of the pro-Lisp-faction: Writing almost everything by hand
> doesn't mean having new control structures:

This is exactly what Paul was saying: it can be done without macros. He
didn't say it was easy. :-)

> Lisp is nicer there.

I think you mean: Lisp is no better than the next language with macros.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <djvebxledr.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Markus E.L. 2 wrote:
>> Not quite. I don't want to write
>> 
>>   case
>> 
>>      fun () -> condition1   =>   fun () => action1
>>      fun () -> condition2   =>   fun () => action2
>>      fun () -> condition3   =>   fun () => action3
>> 
>> We're talking about creating new control structures from existing
>> ones, that explicitely involves getting the syntax right to a certain
>> amount (though I admit your point: If one completely ignores
>> syntactical requirements and imposes the burden of writing lambdas
>> around parts of the control structure's arguments, one can get by w/o
>> macros or lazy evaluation. Though this actually means making a point
>> in favour of the pro-Lisp-faction: Writing almost everything by hand
>> doesn't mean having new control structures:
>
> This is exactly what Paul was saying: it can be done without macros. He
> didn't say it was easy. :-)
>
>> Lisp is nicer there.
>
> I think you mean: Lisp is no better than the next language with macros.

Exactly that. :-) I mean: Languages with macros are nicer there and
Pauls suggestion to use class function and closure of course allows
one to achieve the same effect writing out lazyness manually, but
doesn't cut the mustard syntactically (syntactically-wise? :-) I read
the wrong literature).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b2673a$0$1597$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. 2 wrote:
> Exactly that. :-) I mean: Languages with macros are nicer there and
> Pauls suggestion to use class function and closure of course allows
> one to achieve the same effect writing out lazyness manually, but
> doesn't cut the mustard syntactically (syntactically-wise? :-) I read
> the wrong literature).

Yes. However, macros are not entirely peachy. Every time someone pulls in a
macro they're forking the language and making it slightly more difficult
for other people to grok their new syntax. This is why overuse of macros is
discouraged in any language. You may also have problems with interactions
between macros.

I would rather the language bundled all common syntactic constructs and the
use of macros was kept to an absolute minimum. For example, I'd like infix
symbols as type constructors so they can be used in pattern matches (as ::
already is).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u7bqdpjy7r.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Markus E.L. 2 wrote:
>> Exactly that. :-) I mean: Languages with macros are nicer there and
>> Pauls suggestion to use class function and closure of course allows
>> one to achieve the same effect writing out lazyness manually, but
>> doesn't cut the mustard syntactically (syntactically-wise? :-) I read
>> the wrong literature).
>
> Yes. However, macros are not entirely peachy. Every time someone pulls in a
> macro they're forking the language and making it slightly more difficult
> for other people to grok their new syntax. This is why overuse of macros is
> discouraged in any language. You may also have problems with interactions
> between macros.

Fortunately hygienic macros have been invented for that. I find the
macro system in scheme and the way it is used to extend the language
from a minimal core to the standard level really really
impressive. Still ... macros have such a high potential for abuse.

>
> I would rather the language bundled all common syntactic constructs and the
> use of macros was kept to an absolute minimum. For example, I'd like infix
> symbols as type constructors so they can be used in pattern matches (as ::
> already is).

I'd support this. :-).

Regards -- Markus
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2lkcscx97.fsf@snobis.de>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> Still ... macros have such a high potential for abuse.

Hmmm... exactly which language feature has no high potential for
abuse? There are things I've seen in Java code, that nobody should
ever have seen.

BTW: Why are macros forking the language or are hard to understand? I
just had to learn the .Net library -- no macros but I still don't
understand code fragments that use library parts still unknown to me.

Macros and libraries always create some kind of sub language that you
have to lean. I don't see much differnce...

-- 
Stefan.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-D15768.20184503082007@news-europe.giganews.com>
In article <··············@snobis.de>, Stefan Nobis <······@gmx.de> 
wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
> 
> > Still ... macros have such a high potential for abuse.
> 
> Hmmm... exactly which language feature has no high potential for
> abuse? There are things I've seen in Java code, that nobody should
> ever have seen.
> 
> BTW: Why are macros forking the language or are hard to understand? I
> just had to learn the .Net library -- no macros but I still don't
> understand code fragments that use library parts still unknown to me.
> 
> Macros and libraries always create some kind of sub language that you
> have to lean. I don't see much differnce...


1st rule:
 avoid macros and use functions.

2nd rule:
  if you really need macros, use functions.

3rd rule:
  if you really really really need a macro, then
  write the macro and then try to replace it
  with a function.

4th rule:
  if you write a macro, write a good one.


Functions are usually much easier to debug.
There are also quite a few traps with macro
programming.

Macros are handy, but it takes a bit experience
to develop them and write quality code.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xntzrfzgku.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@snobis.de>, Stefan Nobis <······@gmx.de> 
> wrote:
>
>> ·····································@ANDTHATm-e-leypold.de (Markus
>> E.L. 2) writes:
>> 
>> > Still ... macros have such a high potential for abuse.
>> 
>> Hmmm... exactly which language feature has no high potential for
>> abuse? There are things I've seen in Java code, that nobody should
>> ever have seen.
>> 
>> BTW: Why are macros forking the language or are hard to understand? I
>> just had to learn the .Net library -- no macros but I still don't
>> understand code fragments that use library parts still unknown to me.
>> 
>> Macros and libraries always create some kind of sub language that you
>> have to lean. I don't see much differnce...
>
>
> 1st rule:
>  avoid macros and use functions.
>
> 2nd rule:
>   if you really need macros, use functions.
>
> 3rd rule:
>   if you really really really need a macro, then
>   write the macro and then try to replace it
>   with a function.
>
> 4th rule:
>   if you write a macro, write a good one.
>
>
> Functions are usually much easier to debug.
> There are also quite a few traps with macro
> programming.
>
> Macros are handy, but it takes a bit experience
> to develop them and write quality code.

:-).

Rainer, I'd absolutely agree with that and could not have said it
better. I'd like to add that my experience is that language novices
(myself included) are usually su much fascinated by the possibilities
they offer to build custom syntax that they (and I'd almost say:
always) use them much too early. "Macros are harmful. Don't use them
if you do not have at least XXX code lines of experience" should be
written hugely as a warning into every text book on a language that
has macros (or they should just qoute your 4 rules).

Regards -- Markus
From: =?utf-8?b?R2lzbGUgU8ODwqZsZW5zbWk=?= =?utf-8?b?bmRl?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <0nsl6vv82r.fsf@kaktus.ii.uib.no>
Rainer Joswig <······@lisp.de> writes:

> > 
> > Macros and libraries always create some kind of sub language that you
> > have to lean. I don't see much differnce...
> 
> 
> 1st rule:
>  avoid macros and use functions.
> 
> 2nd rule:
>   if you really need macros, use functions.
> 
> 3rd rule:
>   if you really really really need a macro, then
>   write the macro and then try to replace it
>   with a function.
> 
> 4th rule:
>   if you write a macro, write a good one.
> 

While macros are harder to understand and debug than functions, and should
be used sparingly for that reason, they can in many cases make it easier
to get your problem right. This is in particularly the case for with- style
macros like with-open-file, that ensure that things are properly cleaned up.
I have often done that with C-bindings that require you to allocate a 
structure before use, and to close it after you finish. Of cause instead of
with-open-file you could have implemented it (or other with-style functions)
as something like:

(defun call-with-open-file (path func &key (direction :input) &rest rest)
  (let ((stream (open path :direction direction)))
    (unwind-protect
      (funcall func stream rest)
      (close file))))


But I don't think that would improve the readability or make debugging easier
than using the with- macro. It is however a good thing if you use some of the
estabished idioms for macros (like with- and do- macros , several of them covered
in "on lisp"). On the other hand, if macros are used everywhere in the code
in unusual ways, that can really make the code unmaintainable. I think your
advice could make sense in the macro chapter in an introductury text to CL
still. 

-- 
Gisle Sælensminde, Phd student, Scientific programmer
Computational biology unit, BCCS, University of Bergen, Norway, 
Email: ·····@cbu.uib.no
The best way to travel is by means of imagination
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-C69913.15004507082007@news-europe.giganews.com>
In article <··············@kaktus.ii.uib.no>,
 ·····@kaktus.ii.uib.no (Gisle SÃõlensminde) wrote:

> Rainer Joswig <······@lisp.de> writes:
> 
> > > 
> > > Macros and libraries always create some kind of sub language that you
> > > have to lean. I don't see much differnce...
> > 
> > 
> > 1st rule:
> >  avoid macros and use functions.
> > 
> > 2nd rule:
> >   if you really need macros, use functions.
> > 
> > 3rd rule:
> >   if you really really really need a macro, then
> >   write the macro and then try to replace it
> >   with a function.
> > 
> > 4th rule:
> >   if you write a macro, write a good one.
> > 
> 
> While macros are harder to understand and debug than functions, and should
> be used sparingly for that reason, they can in many cases make it easier
> to get your problem right. This is in particularly the case for with- style
> macros like with-open-file, that ensure that things are properly cleaned up.
> I have often done that with C-bindings that require you to allocate a 
> structure before use, and to close it after you finish. Of cause instead of
> with-open-file you could have implemented it (or other with-style functions)
> as something like:
> 
> (defun call-with-open-file (path func &key (direction :input) &rest rest)
>   (let ((stream (open path :direction direction)))
>     (unwind-protect
>       (funcall func stream rest)
>       (close file))))
> 
> 
> But I don't think that would improve the readability or make debugging easier
> than using the with- macro.

The advantage is that you see that form in the debugger on the stack.
The macro is gone at runtime. All you see is the generated code running.

Sometimes you see that a with- macro expands into something like
the above code.

> It is however a good thing if you use some of the
> estabished idioms for macros (like with- and do- macros , several of them covered
> in "on lisp"). On the other hand, if macros are used everywhere in the code
> in unusual ways, that can really make the code unmaintainable. I think your
> advice could make sense in the macro chapter in an introductury text to CL
> still.

-- 
http://lispm.dyndns.org
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9b0c8$333$1@registered.motzarella.org>
Markus E.L. 2 schrieb:

> Fortunately hygienic macros have been invented for that. I find the
> macro system in scheme and the way it is used to extend the language
> from a minimal core to the standard level really really
> impressive. Still ... macros have such a high potential for abuse.

As soon you are having a Lisp dialect like Common Lisp or Scheme you are
at home :-)
Which one you take is really mostly a matter of taste.
In Lisp you can make define and define functions as you do in Scheme.
In Scheme you can make defun and define functions as you do in CL.
You can also use whatever macro system you like.
If you don't like the one that comes with Lisp, sit down one hour or even
three days and make one that is better for the task at hand.


>> I would rather the language bundled all common syntactic constructs and the
>> use of macros was kept to an absolute minimum. For example, I'd like infix
>> symbols as type constructors so they can be used in pattern matches (as ::
>> already is).
> 
> I'd support this. :-).

Write it.
You could read Practical Common Lisp in one weekend and begin to read On 
Lisp. After only a few days you can write your own pattern matcher. And
you can use something like :: as well. If you insist on having :: you would
have to play with reader macros, as :: already has a meaning for some
other case. But lets take ~ instead.
If you like simple auto destruction for lists do this:
(defun (f~r)
   (foo f (bar r)))

Instead of
(defun (list)
   (foo (first list) (bar (rest list))))

To all Lisp newbies: the code I presented does not run this way.
I wanted to motivate Markus to take a few hours of nearly no Lisp experience
and make this work, by defining a macro on top of defun, that
expands into defun.


Andr�
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b25b53$0$1602$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. 2 wrote:
> Paul Rubin wrote:
>> With first class functions and closures, of course.
> 
> if (C ...) (A ...) (B ....)
> 
> would be strict in both (A) and (B). Since the purpose of (C) might be
> to avoid evaluation of one of both to _|_, I don't see how class
> functions and closures would allow you to write new control structures
> in the general case.

You defer the computations either by wrapping them in an anonymous function
or by making them lazy.

Mathematica has an extended If expression, for example:

  If[True, a, b, c] -> a
  If[False, a, b, c] -> b
  If[AnythingElse, a, b, c] -> c

You can write this in OCaml as:

# let if3 p a b c =
    Lazy.force
      (match p with
       | `True -> a
       | `False -> b
       | _ -> c);;
val if3 : [> `False | `True ] -> 'a Lazy.t -> 'a Lazy.t -> 'a Lazy.t -> 'a =
  <fun>

then you can use it like this:

# if3 `Foo
    (lazy(print_endline "a"))
    (lazy(print_endline "b"))
    (lazy(print_endline "c"));;
c
- : unit = ()

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <9b4pjhmt9v.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Markus E.L. 2 wrote:
>> Paul Rubin wrote:
>>> With first class functions and closures, of course.
>> 
>> if (C ...) (A ...) (B ....)
>> 
>> would be strict in both (A) and (B). Since the purpose of (C) might be
>> to avoid evaluation of one of both to _|_, I don't see how class
>> functions and closures would allow you to write new control structures
>> in the general case.
>
> You defer the computations either by wrapping them in an anonymous function
> or by making them lazy.

I know that :-). It's the ecplicitness that makes this solution
inferior in my eyes (compared to one with macros which does the
wrapping under the hood) :-).

On the other side Markus' first rule of programming is "Don't be
tempted by macros processors" and the second "No, don't do it". Domain
specific extensions on the syntactic level are seldom appropriate in
my eyes. Makro systems are for language designers, only rarely for
application prgrammers.

Regards -- Markus



>
> Mathematica has an extended If expression, for example:
>
>   If[True, a, b, c] -> a
>   If[False, a, b, c] -> b
>   If[AnythingElse, a, b, c] -> c
>
> You can write this in OCaml as:
>
> # let if3 p a b c =
>     Lazy.force
>       (match p with
>        | `True -> a
>        | `False -> b
>        | _ -> c);;
> val if3 : [> `False | `True ] -> 'a Lazy.t -> 'a Lazy.t -> 'a Lazy.t -> 'a =
>   <fun>
>
> then you can use it like this:
>
> # if3 `Foo
>     (lazy(print_endline "a"))
>     (lazy(print_endline "b"))
>     (lazy(print_endline "c"));;
> c
> - : unit = ()
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9d9ll$6i3$1@registered.motzarella.org>
Markus E.L. 2 schrieb:
> Jon Harrop wrote:
> 
>> Markus E.L. 2 wrote:
>>> Paul Rubin wrote:
>>>> With first class functions and closures, of course.
>>> if (C ...) (A ...) (B ....)
>>>
>>> would be strict in both (A) and (B). Since the purpose of (C) might be
>>> to avoid evaluation of one of both to _|_, I don't see how class
>>> functions and closures would allow you to write new control structures
>>> in the general case.
>> You defer the computations either by wrapping them in an anonymous function
>> or by making them lazy.
> 
> I know that :-). It's the ecplicitness that makes this solution
> inferior in my eyes (compared to one with macros which does the
> wrapping under the hood) :-).

Yes, you are right.
In fact, it is unacceptable. If someone finds

 >> # if3 `Foo
 >>     (lazy(print_endline "a"))
 >>     (lazy(print_endline "b"))
 >>     (lazy(print_endline "c"));;

acceptable then I suggest this person to built her own IF function.
So instead of
if cond then exp1 else exp2
this person would say
myIf cond (lazy exp1) (lazy exp2)

How would the if3 use look like if you want to have several side
effects?
(lazy(print 1; logging 2; print 3;))  ?


No one would ever write such a beast.
One could of course do exactly the same in Lisp, but such a construct
is usually better expressed in a macro. Let MIF be mathematica-if:

(mif t a b c)   => a
(mit nil a b c) => b
(mit anything-else a b c) => c


> On the other side Markus' first rule of programming is "Don't be
> tempted by macros processors" and the second "No, don't do it". Domain
> specific extensions on the syntactic level are seldom appropriate in
> my eyes. Makro systems are for language designers, only rarely for
> application prgrammers.

I can't agree with that, and this is why:
if a non-expert is writing programs it will end up in a kind of a mess 
anyway.
The language does not matter anymore, because this non-expert will write
BS in Lisp as well as in Mercury, PHP or D.
Whatever language the user has, if he is not experienced enough he will
fail to some degree - macros are not needed for that.

If a user on the other hand is experienced with programming in general
and with Lisp in particular, then macros are a great tool, like the
others. Life is dangerous, not programming or macros :-)


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8tnvp$j83$2@online.de>
Markus E.L. schrieb:
>> On Aug 1, 1:05 pm, Joachim Durchholz <····@durchholz.org> wrote:
>>> Sure, it's crucial for macros (you need a very regular syntax to make
>>> writing macros easy enough), but you don't need macros for DSLs.
>> Macros are just a shorthand, everything you could do with them you
>> could do without them. So you *could* write your DSL without macros
> 
> Really? In a language which evaluates arguments to functions eagerly
> -- how would I design new control structures without macros?

I'd store the code in the form of closures.
(Currying *might* be the extra mechanism to make this work out well 
enough to be useful in practice. Just hypothesizing though.)

Regards,
Jo
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8q93q$n26$1@online.de>
Slobodan Blazeski schrieb:
> 
> Macros are just a shorthand, everything you could do with them you
> could do without them.

Sure, just as with any construct of any programming language.

 > So you *could* write your DSL without macros
> like (calculate-approximate-military-budget 2007) but they could save
> you from writing a lot of boileplate code and help you catch a lot of
> patterns. Macros  could make your code more concise,  they could
> generate functions that elsewhere you *must* write by hand, becoming a
> so-called human compiler.

How does that differ from simply wrapping stuff in a function?

 > If there's a tool that could make your life
> easier why not using it. Just look at Paul Graham's book On Lisp
> freely available at http://www.paulgraham.com/onlisptext.html  . 


 > If
> you don't find anything that could help your work becoming more easy
> than maybe your domain and/or personality is not lisp-friendly.

Well, the positive thing about Lisp was that fiddling around with the 
MAP functions in my first Lisp weeks was an incredible a-ha experience. 
Lisp soon lost all credit when the Lisp systems that I encountered were 
bug-ridden, slow, and the Lisp programs written were difficult to 
understand.
Much of these problems had to do with shallow binding, and I think that 
fad has gone for good. Another issue was with lack of static typing; I 
understand that you can have static typing in Lisp where you need it, so 
things have improved here, but I doubt that this will work well with, 
say, 3rd-party libraries. (I like to guess what a function does from its 
types; in fact, there's a "theorems for free" school that has 
demonstrated that a surprisingly large portion of a function's semantics 
can be inferred from its types alone if the function is known to be pure.)
I think that syntactic sugar is very often a sign of inelegant language 
design. Macros are a way of doing syntactic sugar, so heavy reliance on 
macro mechanisms makes me predisposed to concluding that the language 
isn't expressive enough wrt. building abstractions.
I also think that while Lisp had a very lean-and-mean syntax initially 
(just S-expressions), it lost most of that by putting all that syntactic 
sugar back in through macros. Sure, it's still all S-expressions, but 
macro names are essentially what keywords are in other languages: 
boilerplate that shouldn't be necessary.

Five years ago, I'd have said "well, you can't work without quite some 
amount of boilerplace keywords". Then I got to know Haskell, and now I 
think that boilerplate keywords should be restricted.

Just my 2c, to illustrate the perspective from which I'm eyeing Lisp: 
sympathetic with the general concept, but it seems to have become too 
baroque for my taste. Plus, it has empowered the programmer (lots of 
powerful commands) without empowering the maintainer (few guarantees 
about what a call does, you need to do whole-system analysis to exclude 
that, say, some code you call doesn't overwrite your local variables).

I know much of this isn't relevant to many Lispers, so YMMV :-)

Regards,
Jo
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8qlap$jnf$1@wildfire.prairienet.org>
Joachim Durchholz wrote:
 > Slobodan Blazeski schrieb:
 >> Macros  could make your code more concise,  they could
 >> generate functions that elsewhere you *must* write by hand,
 >> becoming a so-called human compiler.

 > How does that differ from simply wrapping stuff in a function?

Introducing a global definition is an action, not a function.
It sounds like that's what Slobodan was referring to.

 > I think that syntactic sugar is very often a sign of inelegant
 > language design. Macros are a way of doing syntactic sugar,
 > so heavy reliance on macro mechanisms makes me predisposed
 > to concluding that the language isn't expressive enough wrt.
 > building abstractions.

This doesn't make sense.  Macros are an expressive way of building
abstractions.  What's your idea of a sufficiently expressive way?

 > I also think that while Lisp had a very lean-and-mean syntax initially
 > (just S-expressions), it lost most of that by putting all that
 > syntactic sugar back in through macros. Sure, it's still all
 > S-expressions, but macro names are essentially what keywords are
 > in other languages: boilerplate that shouldn't be necessary.

Nonsense.  You can't match an expression to a pattern without
the "match" keyword, and you can't use a HOF without calling it.
How can the computer do any computation without your giving it
some idea of what you want?

 > it has empowered the programmer (lots of powerful commands)
 > without empowering the maintainer (few guarantees about what
 > a call does, you need to do whole-system analysis to exclude that,
 > say, some code you call doesn't overwrite your local variables).

Not necessarily.  If you look at the called code and see that
it doesn't mess with any extra variables, then it's okay.

-- 
Dan
www.prairienet.org/~dsb/
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-696F6E.17484301082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Slobodan Blazeski schrieb:
> > 
> > Macros are just a shorthand, everything you could do with them you
> > could do without them.
> 
> Sure, just as with any construct of any programming language.
> 
>  > So you *could* write your DSL without macros
> > like (calculate-approximate-military-budget 2007) but they could save
> > you from writing a lot of boileplate code and help you catch a lot of
> > patterns. Macros  could make your code more concise,  they could
> > generate functions that elsewhere you *must* write by hand, becoming a
> > so-called human compiler.
> 
> How does that differ from simply wrapping stuff in a function?

The biggest difference is that macros allow arbitrary computation
at the time they are expanded. For example when the
compiler compiles code, the macros are executed in
the compile time environment. The compiler itself
is running in a Lisp environment. So you can
express code transformations that happen at compile time.

It allows you to do generate source that manually
would be difficult to write. The source generation
happens then at compile time.

Another typical use is that the development environment is
a Lisp environment, which happens to host the compiler.
Macros then allow you to execute code in the development
environment during compilation. Top-level macros
like DEFUN and others can record definitions,
arglists, source location and other information via
compile time side effects. The compiler needs to know
less about the development environment - that can be
put in to the defining macros.

 
>  > If there's a tool that could make your life
> > easier why not using it. Just look at Paul Graham's book On Lisp
> > freely available at http://www.paulgraham.com/onlisptext.html  . 
> 
> 
...

-- 
http://lispm.dyndns.org
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186068891.801657.183360@e9g2000prf.googlegroups.com>
On Aug 1, 8:33 am, Joachim Durchholz <····@durchholz.org> wrote:
> How does that differ from simply wrapping stuff in a function?

Every semi-popular language has syntactic constructs that aren't
"necessary" but neverthless are quite useful.  (The fundamental subset
for languages with first class functions is smaller than the subset
for other languages.)

One might believe that a general purpose programming language has
every interesting syntactic construct, but that would make said
language very different from natural languages.  After all, part of
becoming a domain expert is learning how to speak the language - in
other words, there are DSLs in natural language as well.  And, it's
not just noun and verb definitions - it's syntax as well.

Every significant application that I've seen has had syntactic
patterns that weren't concisely expressable in the relevant
programming language.  The "repeat the boilerplate" approach always
led to bugs.

Considering that we insist on the ability to use functions to avoid
certain kinds of "repeat the boilerplate", I'm surprised that folks
insist on "repeat the boilerplate" for syntax.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tnr5$j83$1@online.de>
Andy Freeman schrieb:
> Every significant application that I've seen has had syntactic
> patterns that weren't concisely expressable in the relevant
> programming language.

OK, now that's interesting, since I've been asserting that this is 
exactly the kind of trouble you don't get with a "roughly Haskellish 
syntax plus HOFs" (*very* roughly speaking).
I'd be very interested in collecting counterexamples to that position.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186099153.456720.168080@z24g2000prh.googlegroups.com>
On Aug 2, 4:03 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > Every significant application that I've seen has had syntactic
> > patterns that weren't concisely expressable in the relevant
> > programming language.
>
> OK, now that's interesting, since I've been asserting that this is
> exactly the kind of trouble you don't get with a "roughly Haskellish
> syntax plus HOFs" (*very* roughly speaking).
> I'd be very interested in collecting counterexamples to that position.

Look at any of your largish applications.  I'll bet that if you step
back a bit, you'll see similar code fragments in multiple places.  You
had haskellish syntax and HOFs and you didn't exploit the opportunity
to abstract and reuse.

Repetition is good in some cases.  But, it would be quite curious if
"good repetition", which is domain- and application- specific,
happened to correspond exactly to what was convenient in haskell.

Folks don't abstract and reuse when the cost is higher than the
benefit, so reducing the cost allows folks to abstract and reuse more
often.

Syntax is important - it affects cost.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f93300$817$1@online.de>
Andy Freeman schrieb:
> On Aug 2, 4:03 pm, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>>> Every significant application that I've seen has had syntactic
>>> patterns that weren't concisely expressable in the relevant
>>> programming language.
>> OK, now that's interesting, since I've been asserting that this is
>> exactly the kind of trouble you don't get with a "roughly Haskellish
>> syntax plus HOFs" (*very* roughly speaking).
>> I'd be very interested in collecting counterexamples to that position.
> 
> Look at any of your largish applications.  I'll bet that if you step
> back a bit, you'll see similar code fragments in multiple places.

If that were the case, I'd never had taken the stance that 
simple-syntax/HOFs were adequate.
So, no, I don't see this kind of repetition.

I do see repetition in those cases where factoring out commonalities 
wasn't worth the effort, or where I wasn't sure how to best factor out.
I have yet to encounter something that's screaming "macrofy me!" at me.

> Repetition is good in some cases.  But, it would be quite curious if
> "good repetition", which is domain- and application- specific,
> happened to correspond exactly to what was convenient in haskell.

At that level of discussion, one could equally well say that "it's 
unlikely that 'good repetition' will happen to correspond exactly to 
what is convenient in Lisp", so please be a bit more specific.

> Folks don't abstract and reuse when the cost is higher than the
> benefit, so reducing the cost allows folks to abstract and reuse more
> often.
> 
> Syntax is important - it affects cost.

Agreed.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186517905.273083.142310@z24g2000prh.googlegroups.com>
On Aug 4, 4:44 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > Look at any of your largish applications.  I'll bet that if you step
> > back a bit, you'll see similar code fragments in multiple places.
>
> I do see repetition in those cases where factoring out commonalities
> wasn't worth the effort, or where I wasn't sure how to best factor out.
> I have yet to encounter something that's screaming "macrofy me!" at me.

"Wasn't worth the effort" says that the cost was more than the
benefit.  It doesn't say that there wasn't a benefit.  Some things are
awkward to do with HOFs, so you don't do them.

> > Repetition is good in some cases.  But, it would be quite curious if
> > "good repetition", which is domain- and application- specific,
> > happened to correspond exactly to what was convenient in haskell.
>
> At that level of discussion, one could equally well say that "it's
> unlikely that 'good repetition' will happen to correspond exactly to
> what is convenient in Lisp", so please be a bit more specific.

The value of macros is that if a given form of syntactic repetition is
not good, I can abstract it out.

Yes, one can use HOFs to do lots of things with standard control
structures, but that requires defining said HOFs and writing instances
of said standard control structures.  There's value in automating
those things, in providing a shorthand.

-andy
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9c2tg$ebg$1@online.de>
Andy Freeman schrieb:
> On Aug 4, 4:44 pm, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>>> Look at any of your largish applications.  I'll bet that if you step
>>> back a bit, you'll see similar code fragments in multiple places.
>> I do see repetition in those cases where factoring out commonalities
>> wasn't worth the effort, or where I wasn't sure how to best factor out.
>> I have yet to encounter something that's screaming "macrofy me!" at me.
> 
> "Wasn't worth the effort" says that the cost was more than the
> benefit.

Sorry, I meant to say "wasn't worth thinking about".

 > It doesn't say that there wasn't a benefit.  Some things are
> awkward to do with HOFs, so you don't do them.

What kind of things?

>>> Repetition is good in some cases.  But, it would be quite curious if
>>> "good repetition", which is domain- and application- specific,
>>> happened to correspond exactly to what was convenient in haskell.
>> At that level of discussion, one could equally well say that "it's
>> unlikely that 'good repetition' will happen to correspond exactly to
>> what is convenient in Lisp", so please be a bit more specific.
> 
> The value of macros is that if a given form of syntactic repetition is
> not good, I can abstract it out.
> 
> Yes, one can use HOFs to do lots of things with standard control
> structures, but that requires defining said HOFs and writing instances
> of said standard control structures.

Now under a macro regime, abstracting stuff out requires defining the 
macro and writing calls to it, so this aspect is still the same.

Where do macros *differ* from HOFs?

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186600769.151142.109390@q3g2000prf.googlegroups.com>
On Aug 8, 2:38 am, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > "Wasn't worth the effort" says that the cost was more than the
> > benefit.
>
> Sorry, I meant to say "wasn't worth thinking about".

Of course it wasn't worth thinking about - you couldn't do anything
about it even if you did think about it.

> > The value of macros is that if a given form of syntactic repetition is
> > not good, I can abstract it out.
>
> > Yes, one can use HOFs to do lots of things with standard control
> > structures, but that requires defining said HOFs and writing instances
> > of said standard control structures.
>
> Now under a macro regime, abstracting stuff out requires defining the
> macro and writing calls to it, so this aspect is still the same.

Without the macro, every time you want to use the pattern, you have to
correctly define the relevant HOFs and use the relevant control
structures.

With the macro, you just use the macro "every time", which is, by
definition, clearer than defining the HOFs and correctly using the
relevant control structures.  That's the benefit.  The cost is that
you have to define the macro, which is a one-time thing.

Yes, the macro use is "by definition" better than the inline HOF
definitions and use of control structures because if it isn't better
for the case/pattern of interest, you shouldn't be using a macro for
that case.  (The existence of macros isn't an obligation to use them
any more than the existence of HOFs obligates you to use them
everywhere.)  In fact, the savings over all uses has to be greater
than the cost of defining the macro.

> Where do macros *differ* from HOFs?

Macros are code that writes code.  The written code can include
definitions, instances of control structures, etc, that I'd otherwise
have to do by hand each and every time I wanted to use the
abstraction.

HOFs are "merely" functions that operate on functions.

Macros are like a subroutine (and HOFs) in that they let me think/work
at a higher level.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkbb3nephmaf9@corp.supernews.com>
Andy Freeman wrote:
> On Aug 8, 2:38 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Now under a macro regime, abstracting stuff out requires defining the
>> macro and writing calls to it, so this aspect is still the same.
> 
> Without the macro, every time you want to use the pattern, you have to
> correctly define the relevant HOFs and use the relevant control
> structures.

You also define the HOF once as Jo said, so there is no difference.

> Macros are code that writes code.  The written code can include
> definitions, instances of control structures, etc, that I'd otherwise
> have to do by hand each and every time I wanted to use the
> abstraction.

Can you give an example? I can't think of anything a macro can factor that
other constructs cannot...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fKrui.42396$A5.1540296@phobos.telenet-ops.be>
Jon Harrop wrote:
> Andy Freeman wrote:
>> On Aug 8, 2:38 am, Joachim Durchholz <····@durchholz.org> wrote:
>>> Now under a macro regime, abstracting stuff out requires defining the
>>> macro and writing calls to it, so this aspect is still the same.
>> Without the macro, every time you want to use the pattern, you have to
>> correctly define the relevant HOFs and use the relevant control
>> structures.
> 
> You also define the HOF once as Jo said, so there is no difference.
> 
>> Macros are code that writes code.  The written code can include
>> definitions, instances of control structures, etc, that I'd otherwise
>> have to do by hand each and every time I wanted to use the
>> abstraction.
> 
> Can you give an example? I can't think of anything a macro can factor that
> other constructs cannot...
> 

You're pretty close to the Turing argument there.

Think about the defclass macro for instance, that's not really the same 
thing as a higher order function. Then you have cl-sql which defines a 
new kind of defclass for those objects coming from a database, adding 
properties specific to databases, and creating classes which keep track 
of dirty status. Ken Tilton's cells relies on such tweaked defclass too 
i think, defining dynamic slots which update their state automatically.

Or let's imagine a new kind of defun which would log the function name 
and the parameters on each invocation. Sure you could do it with higher 
order function, but a macro just seems easier, I don't need to type the 
name of the function a second time. I think I've seen a prevalence 
implementation using such construct.

Sacha
From: Philippa Cowderoy
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Pine.WNT.4.64.0708082356240.1568@sleek>
On Wed, 8 Aug 2007, Jon Harrop wrote:

> Andy Freeman wrote:
> > Macros are code that writes code.  The written code can include
> > definitions, instances of control structures, etc, that I'd otherwise
> > have to do by hand each and every time I wanted to use the
> > abstraction.
> 
> Can you give an example? I can't think of anything a macro can factor that
> other constructs cannot...
> 

Bindings'd be the one I usually miss. I don't think I've seen a language 
that can create something similar to Haskell's do statements without 
syntactic manipulation for example - and monads aren't the only structure 
that might want that kind of sugar.

-- 
······@flippac.org

'In Ankh-Morpork even the shit have a street to itself...
 Truly this is a land of opportunity.' - Detritus, Men at Arms
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmvb766m7gma5@corp.supernews.com>
Philippa Cowderoy wrote:
> Bindings'd be the one I usually miss. I don't think I've seen a language
> that can create something similar to Haskell's do statements without
> syntactic manipulation for example - and monads aren't the only structure
> that might want that kind of sugar.

I see, interesting.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186682772.670001.275220@q3g2000prf.googlegroups.com>
On Aug 8, 1:45 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > On Aug 8, 2:38 am, Joachim Durchholz <····@durchholz.org> wrote:
> >> Now under a macro regime, abstracting stuff out requires defining the
> >> macro and writing calls to it, so this aspect is still the same.
>
> > Without the macro, every time you want to use the pattern, you have to
> > correctly define the relevant HOFs and use the relevant control
> > structures.
>
> You also define the HOF once as Jo said, so there is no difference.

I have to write the macro; I do that once.  Each time the macro is
used, it defines an HOF.  That happens multiple times.

Without the macro, I have to define the HOF multiple times.

Since multiple is not once, there is a difference.

> > Macros are code that writes code.  The written code can include
> > definitions, instances of control structures, etc, that I'd otherwise
> > have to do by hand each and every time I wanted to use the
> > abstraction.
>
> Can you give an example? I can't think of anything a macro can factor that
> other constructs cannot...

Suppose that OCaml didn't have something semantically equivalent to
C's ?:.  It would be easy enough to write a functional that does the
right thing, but the call is problematic because you don't want to
evaluate all of the arguments.  So, you'd have to call it with
closures or some sort of lazy construct.

Or, consider python 2.6's with statement.  (http://docs.python.org/ref/
with.html)  Basically, it guarantees that a block of code is executed
inside a specific environment (and provides a reference to parts of
that environment) with some cleanup when that block exits.  Yes, one
can write the required code every place one wants to use that idiom,
and it would be easy enough to define a functional version and call it
with closures, but it's simpler to just have syntax.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmvis2mdngoa7@corp.supernews.com>
Andy Freeman wrote:
> I have to write the macro; I do that once.  Each time the macro is
> used, it defines an HOF.  That happens multiple times.
> 
> Without the macro, I have to define the HOF multiple times.
>
> Since multiple is not once, there is a difference.

When can you not factor your HOFs into another HOF or functor? (Except for
Philippa's example of adding bindings).

>> Can you give an example? I can't think of anything a macro can factor
>> that other constructs cannot...
> 
> Suppose that OCaml didn't have something semantically equivalent to
> C's ?:.  It would be easy enough to write a functional that does the
> right thing, but the call is problematic because you don't want to
> evaluate all of the arguments.  So, you'd have to call it with
> closures or some sort of lazy construct.

Yes.

> Or, consider python 2.6's with statement.  (http://docs.python.org/ref/
> with.html)  Basically, it guarantees that a block of code is executed
> inside a specific environment (and provides a reference to parts of
> that environment) with some cleanup when that block exits.  Yes, one
> can write the required code every place one wants to use that idiom,
> and it would be easy enough to define a functional version and call it
> with closures, but it's simpler to just have syntax.

There are certainly trade-offs involved though. Introducing syntax is
forking the language. Your syntax might clash with another syntax, making
it difficult for people to use both and so on. I am not sure it is a good
idea to always reach for a macro.

I face exactly this dilemma with OCaml because it lacks try..finally. For
the time being, I just use a try_finally function and wrap things in
closures. With currying, there is typically zero overhead. There are other
OCaml-specific trade-offs in favour of not-macros as well. If I do go the
macro route I think I will probably make more substantial alterations to
the lexer and parser as well.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186704799.727395.125330@q3g2000prf.googlegroups.com>
On Aug 9, 1:42 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > I have to write the macro; I do that once.  Each time the macro is
> > used, it defines an HOF.  That happens multiple times.
>
> > Without the macro, I have to define the HOF multiple times.
>
> > Since multiple is not once, there is a difference.
>
> When can you not factor your HOFs into another HOF or functor? (Except for
> Philippa's example of adding bindings).

The HOFs in this case are entirely determined by the use-site.  The
pattern in which they are used can be abstracted, that's what the
macro does, but the HOFs can't be.

> > Suppose that OCaml didn't have something semantically equivalent to
> > C's ?:.  It would be easy enough to write a functional that does the
> > right thing, but the call is problematic because you don't want to
> > evaluate all of the arguments.  So, you'd have to call it with
> > closures or some sort of lazy construct.
>
> Yes.

And that requires syntax for the arguments that can be avoided by a
macro definition of ?:.

> There are certainly trade-offs involved though. Introducing syntax is
> forking the language.  Your syntax might clash with another syntax, making
> it difficult for people to use both and so on.

Every name definition forks the language and introduces the
possibility of a clash.

> I am not sure it is a good idea to always reach for a macro.

So what?  No one said "always reach for a macro".  We've only said
that macros can provide benefits in certain situations.  No one is
saying that anyone has to use them.

> I face exactly this dilemma with OCaml because it lacks try..finally. For
> the time being, I just use a try_finally function and wrap things in
> closures. With currying, there is typically zero overhead.

Zero syntax overhead or zero runtime overhead?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bncj5mn9jh6d4@corp.supernews.com>
Andy Freeman wrote:
> The HOFs in this case are entirely determined by the use-site.  The
> pattern in which they are used can be abstracted, that's what the
> macro does, but the HOFs can't be.

Right, but what patterns can be factored out into a macro but not factored
out into a HOF? Can you give a practical example?

> And that requires syntax for the arguments that can be avoided by a
> macro definition of ?:.

Yes: (fun () -> ...)

>> There are certainly trade-offs involved though. Introducing syntax is
>> forking the language.  Your syntax might clash with another syntax,
>> making it difficult for people to use both and so on.
> 
> Every name definition forks the language and introduces the
> possibility of a clash.

You mean defining identifiers? I think adding syntax is more severe than
adding definitions but I cannot think of a logical justification...

>> I face exactly this dilemma with OCaml because it lacks try..finally. For
>> the time being, I just use a try_finally function and wrap things in
>> closures. With currying, there is typically zero overhead.
> 
> Zero syntax overhead or zero runtime overhead?

Probably both.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186769442.930787.71340@i13g2000prf.googlegroups.com>
On Aug 9, 5:24 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > The HOFs in this case are entirely determined by the use-site.  The
> > pattern in which they are used can be abstracted, that's what the
> > macro does, but the HOFs can't be.
>
> Right, but what patterns can be factored out into a macro but not factored
> out into a HOF? Can you give a practical example?

I mis-typed.  The HOF for the pattern is defined once, but the code to
make it useable is required everywhere the HOF is used.  That code
includes defining closures or other mechanisms to allow the HOF to
control evaluation.

And then there's the whole "make definition" thing.

The gyrations to use the try-finally HOF are an example that Harrop
himself has mentioned.

> >> There are certainly trade-offs involved though. Introducing syntax is
> >> forking the language.  Your syntax might clash with another syntax,
> >> making it difficult for people to use both and so on.
>
> > Every name definition forks the language and introduces the
> > possibility of a clash.
>
> You mean defining identifiers? I think adding syntax is more severe than
> adding definitions but I cannot think of a logical justification...

My point is that defining any identifier introduces a possible clash
with an identifier that can be used in the same context.  Defining a
new function introduces a possible clash with a previous function.  In
a lisp-1, defining a new function introduces a possible clash with a
previous function or variable.  (Lisp-2s don't have that problem, but
function names outside the function position have to be treated
specially.)  In lisp, a new macro can clash with a function or another
macro, but since a new function can clash with an old function, I
don't see that as a big deal.

> >> I face exactly this dilemma with OCaml because it lacks try..finally. For
> >> the time being, I just use a try_finally function and wrap things in
> >> closures. With currying, there is typically zero overhead.
>
> > Zero syntax overhead or zero runtime overhead?
>
> Probably both.

Not if there's syntax to delay execution of the body until it's under
the control of the try-finally HOF.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9ikvi$n5b$1@online.de>
Andy Freeman schrieb:
> On Aug 9, 5:24 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> Andy Freeman wrote:
>>> The HOFs in this case are entirely determined by the use-site.  The
>>> pattern in which they are used can be abstracted, that's what the
>>> macro does, but the HOFs can't be.
>> Right, but what patterns can be factored out into a macro but not factored
>> out into a HOF? Can you give a practical example?
> 
> I mis-typed.  The HOF for the pattern is defined once, but the code to
> make it useable is required everywhere the HOF is used.  That code
> includes defining closures or other mechanisms to allow the HOF to
> control evaluation.

That "control evaluation" thing is simply unnecessary in a 
side-effect-free ("pure") language.

The only reason to use a lambda in Haskell (or any other currying 
language) would be those rare cases where parameter order prevents you 
from using currying.

> And then there's the whole "make definition" thing.
> 
> The gyrations to use the try-finally HOF are an example that Harrop
> himself has mentioned.

try-finally is not something that you usually need in a pure language.
Nor do you need loops - what you write as a loop in a side-effectful 
("impure") language is simply a fold in a pure language. Think mapcar 
and friends, systematically applied to everything, and the compiler 
conspiring with the standard libraries to make the whole thing efficient.

>>>> There are certainly trade-offs involved though. Introducing syntax is
>>>> forking the language.  Your syntax might clash with another syntax,
>>>> making it difficult for people to use both and so on.
>>> Every name definition forks the language and introduces the
>>> possibility of a clash.
>> You mean defining identifiers? I think adding syntax is more severe than
>> adding definitions but I cannot think of a logical justification...
> 
> My point is that defining any identifier introduces a possible clash
> with an identifier that can be used in the same context.  Defining a
> new function introduces a possible clash with a previous function. 

Yes, you need namespaces of some kind.
I think something like Java's approach would be necessary: define a 
global namespace where everybody has a good place for put his 
definitions in. The details make that approach quite clumsy though - I 
think one could do better.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186784819.134885.247440@i13g2000prf.googlegroups.com>
On Aug 10, 2:23 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > I mis-typed.  The HOF for the pattern is defined once, but the code to
> > make it useable is required everywhere the HOF is used.  That code
> > includes defining closures or other mechanisms to allow the HOF to
> > control evaluation.
>
> That "control evaluation" thing is simply unnecessary in a
> side-effect-free ("pure") language.

There are things that aren't ever evaluated, such as the name for a
binding.

BTW - pure isn't enough to ignore evaluation order, effectively lazy
is required as well.  (BTW - does it make sense to have a lazy
language that isn't pure?)

> The only reason to use a lambda in Haskell (or any other currying
> language) would be those rare cases where parameter order prevents you
> from using currying.

Are all currying languages pure and lazy?

> > The gyrations to use the try-finally HOF are an example that Harrop
> > himself has mentioned.
>
> try-finally is not something that you usually need in a pure language.

try-finally isn't something that you usually need in any language.
However it's fairly useful if you've got any sort of non-local exit
because guaranteeing post-conditions is often important.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <t78x8iq5q4.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

>> The only reason to use a lambda in Haskell (or any other currying
>> language) would be those rare cases where parameter order prevents you
>> from using currying.
>
> Are all currying languages pure and lazy?

No. See OCaml.

- M
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqo4he4rdne37@corp.supernews.com>
Andy Freeman wrote:
> BTW - pure isn't enough to ignore evaluation order,

Why not?

> (BTW - does it make sense to have a lazy language that isn't pure?)

There was a lazy ML.

>> The only reason to use a lambda in Haskell (or any other currying
>> language) would be those rare cases where parameter order prevents you
>> from using currying.
> 
> Are all currying languages pure and lazy?

OCaml curries and is impure and strict.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <9sodhefgbi.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Andy Freeman wrote:
>> BTW - pure isn't enough to ignore evaluation order,
>
> Why not?
>
>> (BTW - does it make sense to have a lazy language that isn't pure?)
>
> There was a lazy ML.
>
>>> The only reason to use a lambda in Haskell (or any other currying
>>> language) would be those rare cases where parameter order prevents you
>>> from using currying.
>> 
>> Are all currying languages pure and lazy?
>
> OCaml curries and is impure and strict.

Strictly speaking one should distinguish betwenn lazy vs. eager
evaluation (an implementation aspect) and strict vs. non-strict (a
semantical aspect). Those are closely linked together, of course, but
it makes discussion easier to do so.

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9k9g8$7ej$1@online.de>
Andy Freeman schrieb:
> BTW - pure isn't enough to ignore evaluation order, effectively lazy
> is required as well.

Please elaborate.
In my book, pureness *is* enough to ignore evaluation order.
(Assuming nontermination is considered a bug, not an effect.)

 >  (BTW - does it make sense to have a lazy
> language that isn't pure?)

I have been told that such languages do exist, but that they need some 
additional control over when those calls that have side effect are 
really run.
It didn't sound very convincing to me, but then I haven't looked up the 
details.

>> The only reason to use a lambda in Haskell (or any other currying
>> language) would be those rare cases where parameter order prevents you
>> from using currying.
> 
> Are all currying languages pure and lazy?

Currying and laziness are technically orthogonal.
I do suspect that currying and laziness are more helpful in combination 
than when used in isolation, but that's just a hunch.

> try-finally isn't something that you usually need in any language.
> However it's fairly useful if you've got any sort of non-local exit
> because guaranteeing post-conditions is often important.

In a pure language, you don't need non-local exits.

There is one exception: if a computation is aborted due to some failure 
(uncaught exception, division by zero, out of memory, whatever), you do 
have a non-local exit - but you also don't guarantee any postcondition 
anyway.

Resource deallocation is one of the more important postconditions, but 
you don't deallocate resources in a pure languages, so this isn't a 
factor, too.

(Of course, resources are deallocated *somewhere*, even when programming 
in a pure language. The pure language generates a list of actions and 
submits it to the run-time system for execution; you do need some way to 
ensure that the resources are deallocated somewhere. However, such 
action lists are simple data structures, and it's a matter of setting up 
the proper mechanisms in the library to get that done no matter what - 
but that's a question of proper design for the action list data structure.)

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187020667.537153.174400@z24g2000prh.googlegroups.com>
On Aug 11, 5:19 am, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > BTW - pure isn't enough to ignore evaluation order, effectively lazy
> > is required as well.
>
> Please elaborate.
> In my book, pureness *is* enough to ignore evaluation order.
> (Assuming nontermination is considered a bug, not an effect.)

The problem isn't whether an evaluation is delayed, it is whether it
happens at all.
> > try-finally isn't something that you usually need in any language.
> > However it's fairly useful if you've got any sort of non-local exit
> > because guaranteeing post-conditions is often important.
>
> In a pure language, you don't need non-local exits.

"need" isn't the question - "useful" is.

> There is one exception: if a computation is aborted due to some failure
> (uncaught exception, division by zero, out of memory, whatever), you do
> have a non-local exit - but you also don't guarantee any postcondition
> anyway.

Huh?  Why wouldn't I want guaranteed post-conditions in those
circumstances?

> Resource deallocation is one of the more important postconditions, but
> you don't deallocate resources in a pure languages, so this isn't a
> factor, too.

Many programs manipulate external resources.  It really matters
whether you turn off a motor at the right time.  It really matters
whether you finish one activity before starting another, even if
there's no data dependence.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1f7bcul7vra1@corp.supernews.com>
Andy Freeman wrote:
> Many programs manipulate external resources.  It really matters
> whether you turn off a motor at the right time.  It really matters
> whether you finish one activity before starting another, even if
> there's no data dependence.

Yes. You solve that by funnelling your computation through a monad in
Haskell.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9rmlt$7rn$1@online.de>
Andy Freeman schrieb:
> On Aug 11, 5:19 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>>> BTW - pure isn't enough to ignore evaluation order, effectively lazy
>>> is required as well.
>> Please elaborate.
>> In my book, pureness *is* enough to ignore evaluation order.
>> (Assuming nontermination is considered a bug, not an effect.)
> 
> The problem isn't whether an evaluation is delayed, it is whether it
> happens at all.

In a pure language, there is no observable difference regardless of 
whether an unneeded subexpression is evaluated or not (modulo 
nontermination).
For a needed subexpression, they will always be evaluated.

So, I still fail to see a difference - can you elaborate?

>>> try-finally isn't something that you usually need in any language.
>>> However it's fairly useful if you've got any sort of non-local exit
>>> because guaranteeing post-conditions is often important.
>> In a pure language, you don't need non-local exits.
> 
> "need" isn't the question - "useful" is.

They aren't useful either.

You simply don't allocate a resource inside code. It's the set of IO 
actions (a data structure) that allocate and free.

>> There is one exception: if a computation is aborted due to some failure
>> (uncaught exception, division by zero, out of memory, whatever), you do
>> have a non-local exit - but you also don't guarantee any postcondition
>> anyway.
> 
> Huh?  Why wouldn't I want guaranteed post-conditions in those
> circumstances?

A failed computation can't do anything useful. At best

>> Resource deallocation is one of the more important postconditions, but
>> you don't deallocate resources in a pure languages, so this isn't a
>> factor, too.
> 
> Many programs manipulate external resources.  It really matters
> whether you turn off a motor at the right time.  It really matters
> whether you finish one activity before starting another, even if
> there's no data dependence.

In a pure language, you generate a data structure that contains IO 
actions, you don't change the world directly.
This changes the game entirely. You don't ensure the postconditions in a 
finally clause, you generate the data structure so that the final 
deallocation action is always present.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187195344.428380.195820@x40g2000prg.googlegroups.com>
On Aug 14, 12:47 am, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > The problem isn't whether an evaluation is delayed, it is whether it
> > happens at all.
>
> In a pure language, there is no observable difference regardless of
> whether an unneeded subexpression is evaluated or not (modulo
> nontermination).

Termination, or lack thereof, is pretty important.

Heck, resource use by unneeded expressions that do terminate can be
important.


> > "need" isn't the question - "useful" is.
>
> They aren't useful either.
>
> You simply don't allocate a resource inside code.  It's the set of IO
> actions (a data structure) that allocate and free.

IO actions are code.

> > Huh?  Why wouldn't I want guaranteed post-conditions in those
> > circumstances?
>
> A failed computation can't do anything useful.

Huh?  It doesn't matter whether it did anything useful, one still
needs to operate correctly afterwards, whether or not it failed.

> In a pure language, you generate a data structure that contains IO
> actions, you don't change the world directly.
> This changes the game entirely. You don't ensure the postconditions in a
> finally clause, you generate the data structure so that the final
> deallocation action is always present.

Unless deallocation can be programatically controlled, that's not good
enough.

The great strength of a pure language is that its implementations have
great freedom to reorder computation.  That's a problem in some
applications.

Yes, one can introduce artificial data dependencies to generate some
order, but that is overhead.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <jmps1on3hv.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

> On Aug 14, 12:47 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>> > The problem isn't whether an evaluation is delayed, it is whether it
>> > happens at all.
>>
>> In a pure language, there is no observable difference regardless of
>> whether an unneeded subexpression is evaluated or not (modulo
>> nontermination).
>
> Termination, or lack thereof, is pretty important.
>
> Heck, resource use by unneeded expressions that do terminate can be
> important.
>
>
>> > "need" isn't the question - "useful" is.
>>
>> They aren't useful either.
>>
>> You simply don't allocate a resource inside code.  It's the set of IO
>> actions (a data structure) that allocate and free.
>
> IO actions are code.
>
>> > Huh?  Why wouldn't I want guaranteed post-conditions in those
>> > circumstances?
>>
>> A failed computation can't do anything useful.
>
> Huh?  It doesn't matter whether it did anything useful, one still
> needs to operate correctly afterwards, whether or not it failed.
>
>> In a pure language, you generate a data structure that contains IO
>> actions, you don't change the world directly.
>> This changes the game entirely. You don't ensure the postconditions in a
>> finally clause, you generate the data structure so that the final
>> deallocation action is always present.
>
> Unless deallocation can be programatically controlled, that's not good
> enough.
>
> The great strength of a pure language is that its implementations have
> great freedom to reorder computation.  That's a problem in some
> applications.

And the beauty is that this freedom doesn't matter. If the program
parts in question interact with the outside world, the freedom is not
there (since threading the world state thorough alll operations
guarantees a definite order). If it doesn't interact, the freedom is
there, but differences in evaluation ordner are not observable (since
there are no side effects you can use to track evaluation order).

> Yes, one can introduce artificial data dependencies to generate some
> order, but that is overhead.


Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vcnk$bpf$1@online.de>
Andy Freeman schrieb:
> On Aug 14, 12:47 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>>> The problem isn't whether an evaluation is delayed, it is whether it
>>> happens at all.
>> In a pure language, there is no observable difference regardless of
>> whether an unneeded subexpression is evaluated or not (modulo
>> nontermination).
> 
> Termination, or lack thereof, is pretty important.
> 
> Heck, resource use by unneeded expressions that do terminate can be
> important.

Yes, but you don't need macros to optimize.

>>> "need" isn't the question - "useful" is.
>> They aren't useful either.
>>
>> You simply don't allocate a resource inside code.  It's the set of IO
>> actions (a data structure) that allocate and free.
> 
> IO actions are code.

This might not make a difference in Lisp, but it does make a difference 
in most other languages.
And, no, IO actions are not code. Not in your typical pure language (it 
would be impure if IO actions were allowed to be code).

>>> Huh?  Why wouldn't I want guaranteed post-conditions in those
>>> circumstances?
 >>
>> A failed computation can't do anything useful.
> 
> Huh?  It doesn't matter whether it did anything useful, one still
> needs to operate correctly afterwards, whether or not it failed.

Sure. The failed computation still can't do anything useful, it's the 
code that calls it that could decide to try another algorithm or whatever.

>> In a pure language, you generate a data structure that contains IO
>> actions, you don't change the world directly.
>> This changes the game entirely. You don't ensure the postconditions in a
>> finally clause, you generate the data structure so that the final
>> deallocation action is always present.
> 
> Unless deallocation can be programatically controlled, that's not good
> enough.

We're talking actions here. Deallocation is an action, and you don't 
need to rearrange computations.

> The great strength of a pure language is that its implementations have
> great freedom to reorder computation.  That's a problem in some
> applications.
> 
> Yes, one can introduce artificial data dependencies to generate some
> order, but that is overhead.

And causes unwanted ordering overspecifications in many cases.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187275476.335219.10500@x35g2000prf.googlegroups.com>
On Aug 15, 10:22 am, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > Termination, or lack thereof, is pretty important.
>
> > Heck, resource use by unneeded expressions that do terminate can be
> > important.
>
> Yes, but you don't need macros to optimize.

I never said that macros were required for anything.  We're discussing
whether good control over evaluation (whether automatic or programmed)
is important.

> >>> "need" isn't the question - "useful" is.
> >> They aren't useful either.
>
> >> You simply don't allocate a resource inside code.  It's the set of IO
> >> actions (a data structure) that allocate and free.
>
> > IO actions are code.
>
> This might not make a difference in Lisp, but it does make a difference
> in most other languages.
> And, no, IO actions are not code.

write('a') sure looks like code.

> Not in your typical pure language (it
> would be impure if IO actions were allowed to be code).

"code" - things expressed in a programming language that have some
effect on program behavior.

If a program in a pure programming language can do IO, IO operations
are code in said language.

> >> A failed computation can't do anything useful.
>
> > Huh?  It doesn't matter whether it did anything useful, one still
> > needs to operate correctly afterwards, whether or not it failed.
>
> Sure. The failed computation still can't do anything useful, it's the
> code that calls it that could decide to try another algorithm or whatever.

The failed computation didn't do everything one might have liked this
time, but it might have done something useful and it might be
successful in some other circumstance (yes, failure can be dynamic).

However, my point still stands, if you want to do something after a
failed computation, it's useful to be able to establish some post-
conditions.

> > Unless deallocation can be programatically controlled, that's not good
> > enough.
>
> We're talking actions here. Deallocation is an action, and you don't
> need to rearrange computations.

Sure you do.  Suppose two computation each allocate 3GB and are run on
a system that can't handle more than 4GB concurrently.  If they're run
concurrently, they fail.  If they run sequentially, they succeed.  I'm
pretty sure that that difference is important, even if its
implications aren't convenient for certain programming methodologies.

Purity is a tool/mechanism, not a goal.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187278939.327426.159290@g12g2000prg.googlegroups.com>
On Aug 16, 7:44 am, Andy Freeman <······@earthlink.net> wrote:
> On Aug 15, 10:22 am, Joachim Durchholz <····@durchholz.org> wrote:
> > We're talking actions here. Deallocation is an action, and you don't
> > need to rearrange computations.
>
> Sure you do.  Suppose two computation each allocate 3GB and are run on
> a system that can't handle more than 4GB concurrently.  If they're run
> concurrently, they fail.  If they run sequentially, they succeed.  I'm
> pretty sure that that difference is important, even if its
> implications aren't convenient for certain programming methodologies.

Note that inability to run isn't the only inconvenient consequence of
doing "independent" things in the wrong order.  Caching has a huge
effect on the performance of many applications and controlling
evaluation order is an important tool in making caching work.
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9j4me$3gj$1@registered.motzarella.org>
Joachim Durchholz schrieb:

> The only reason to use a lambda in Haskell (or any other currying 
> language) would be those rare cases where parameter order prevents you 
> from using currying.

Aah good, now I can learn something:
how do you curry something like   x * x

(mapcar (lambda (x) (* x x)) list)

Here the parameter order does not matter. But parameter order is
the only reason to use a lambda. From that follows we can curry
this beast away. How?


Andr�
-- 
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xsl6qers8.fsf@ruckus.brouhaha.com>
Andr� Thieme <······························@justmail.de> writes:
> Aah good, now I can learn something:
> how do you curry something like   x * x
> 
> (mapcar (lambda (x) (* x x)) list)
> 
> Here the parameter order does not matter. But parameter order is
> the only reason to use a lambda. From that follows we can curry
> this beast away. How?

I think I don't understand this question.  You can certainly say

  mapx2 = map (\x->x*x)

which is the equivalent of something like

  (define (mapx2 list) (map (lambda (x) (* x x)) list))

is that what you mean?
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9k867$59e$1@online.de>
Andr� Thieme schrieb:
> Joachim Durchholz schrieb:
> 
>> The only reason to use a lambda in Haskell (or any other currying 
>> language) would be those rare cases where parameter order prevents you 
>> from using currying.
> 
> Aah good, now I can learn something:
> how do you curry something like   x * x
> 
> (mapcar (lambda (x) (* x x)) list)

No, that would be the other case where currying doesn't work.
I didn't consider the case where a parameter is used in more than one place.

In Haskell, this would be

   mapcar (\x->x*x) list


Regards,
Jo
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9h7fa$g11$1@online.de>
Andy Freeman schrieb:
> On Aug 8, 2:38 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Andy Freeman schrieb:
>>> "Wasn't worth the effort" says that the cost was more than the
>>> benefit.
>> Sorry, I meant to say "wasn't worth thinking about".
> 
> Of course it wasn't worth thinking about - you couldn't do anything
> about it even if you did think about it.

I do recognize abstraction possibilities even if they cannot be 
exploited in the language being used.

There are cases where abstraction would in theory be possible, but in 
practice there are so many little differences that the abstraction would 
be as complicated as the unabstracted ideas.
Or where the abstraction gains you half a token on the average, while 
restricting the design (abstracting means taking away details, and you 
can't vary what's been taken away).

I think you're chasing the wrong trail here, trying to make me admit 
that I've been missing macros, while I'm talking about opportunities to 
abstract that should *not* be taken for architectural reasons, 
regardless of whether abstraction would be done through functions or macros.

> Without the macro, every time you want to use the pattern, you have to
> correctly define the relevant HOFs and use the relevant control
> structures.

Please describe a situation that a single macro would deal with, but 
where you'd need a different HOF for each case.
I don't expect such a situation to exist, but I'd be interested to hear 
about a counterexample.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186770400.689401.168340@x35g2000prf.googlegroups.com>
On Aug 10, 1:26 am, Joachim Durchholz <····@durchholz.org> wrote:
> I think you're chasing the wrong trail here, trying to make me admit
> that I've been missing macros,

I mostly don't care whether someone else uses macros.  I care whether
macros are available for me to use.

>                      while I'm talking about opportunities to
> abstract that should *not* be taken for architectural reasons,
> regardless of whether abstraction would be done through functions or macros.

I don't think that those architectural reasons hold across all domains
and applications.

People use domain-specific languages.  Maybe they're wrong to do so,
but if they are, that error isn't constrained to programming
languages.  Heck, even within programming languages, we seem to
believe that domain and applicatio specific functions are a good
thing.  Why is syntax necessarily different?

> Please describe a situation that a single macro would deal with, but
> where you'd need a different HOF for each case.

Actually, I mis-typed.  It isn't the pattern's HOF that has to be
defined multiple times.  It's the code to make the HOF usable.

I provided a link to python's with-statement which demonstrates this
(http://docs.python.org/ref/with.html).  One can write a HO "with
function", but it would have to be called with something that delays
the evaluation of the "block" until the relevant context is
established.  Expressing that delay will require syntax for each and
every use.

As a result, no one wrote said with function.  Instead, they wrote the
relevant try-finally code each time.

Clearly the with statement is unnecessary, as are many statements,
because the relevant functionality can be expressed in other ways, but
syntax matters.

Then there's the whole bit of establishing bindings.  Yes, you can use
the existing mechanisms, but those mechanisms are largely orthoganal
to other things.  In some cases, you really do want to tie creating
bindings to other constructions because that is more concise and/or
more likely to be correct.  (There's a form of the with statement that
does this.)
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9ilvf$oo1$1@online.de>
Andy Freeman schrieb:
> On Aug 10, 1:26 am, Joachim Durchholz <····@durchholz.org> wrote:
>> I think you're chasing the wrong trail here, trying to make me admit
>> that I've been missing macros,
> 
> I mostly don't care whether someone else uses macros.  I care whether
> macros are available for me to use.

OK.

>>                      while I'm talking about opportunities to
>> abstract that should *not* be taken for architectural reasons,
>> regardless of whether abstraction would be done through functions or macros.
> 
> I don't think that those architectural reasons hold across all domains
> and applications.
> 
> People use domain-specific languages.  Maybe they're wrong to do so,
> but if they are, that error isn't constrained to programming
> languages.  Heck, even within programming languages, we seem to
> believe that domain and applicatio specific functions are a good
> thing.  Why is syntax necessarily different?

Because it's not syntax that's the difference. You don't need macros if 
you don't need to control when an expression is evaluated.

Syntactically, both macros and function calls are just S-expressions in 
Lisp. It's the semantics that differs, the time at which the expressions 
are evaluated.
You don't need to make that difference if the semantics of an expression 
is independent of when it is evaluated.

> I provided a link to python's with-statement which demonstrates this
> (http://docs.python.org/ref/with.html).  One can write a HO "with
> function", but it would have to be called with something that delays
> the evaluation of the "block" until the relevant context is
> established.  Expressing that delay will require syntax for each and
> every use.
> 
> As a result, no one wrote said with function.  Instead, they wrote the
> relevant try-finally code each time.

This seems to be a problem of languages with side effects.

In Haskell, exceptions are handled using a set of higher-order 
functions. try-finally is approximated, to the extent that is needed in 
a pure language, via the Maybe type.

Like you, I was sceptical when first reading about Haskell. However, I 
found that most of the things that you need - loops, exceptions, 
polymorphic lists, etc. - are either unnecessary, or easily replaced by 
a different idiom, or have a direct implementation.

> Then there's the whole bit of establishing bindings.  Yes, you can use
> the existing mechanisms, but those mechanisms are largely orthoganal
> to other things.  In some cases, you really do want to tie creating
> bindings to other constructions because that is more concise and/or
> more likely to be correct.  (There's a form of the with statement that
> does this.)

What is this binding issue, in a nutshell?

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186786237.211959.210870@i38g2000prf.googlegroups.com>
On Aug 10, 2:40 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Andy Freeman schrieb:
> > People use domain-specific languages.  Maybe they're wrong to do so,
> > but if they are, that error isn't constrained to programming
> > languages.  Heck, even within programming languages, we seem to
> > believe that domain and applicatio specific functions are a good
> > thing.  Why is syntax necessarily different?
>
> Because it's not syntax that's the difference. You don't need macros if
> you don't need to control when an expression is evaluated.

Macros don't just control evaluation.

Macros "just" write code.  That code may evaluate things that appeared
as arguments to the macro, but it may also include code that wasn't in
the macro call.  That code may define names that were macro arguments.

Macros let a programmer write shorter and clearer than was possible
otherwise.

Applications have patterns that appear multiple times.  Macros let the
programmer name those patterns and use them by name instead of
expressing them directly.  The "use by name" need only include that
which is specific to a given use - the macro's definition provides
everything that was common across different instances.

Note that functions do exactly the same thing for a subset of the
patterns that macros can handle.

Why is naming and exploiting a syntax pattern so objectionable?

> > As a result, no one wrote said with function.  Instead, they wrote the
> > relevant try-finally code each time.
> ...
> In Haskell, exceptions are handled using a set of higher-order

try-finally is for post-conditions - exceptions are merely one kind of
exit.

> > Then there's the whole bit of establishing bindings.  Yes, you can use
> > the existing mechanisms, but those mechanisms are largely orthoganal
> > to other things.  In some cases, you really do want to tie creating
> > bindings to other constructions because that is more concise and/or
> > more likely to be correct.  (There's a form of the with statement that
> > does this.)
>
> What is this binding issue, in a nutshell?

Code often uses names for values.  Entities that generate code,
whether human, macro, or other, generate forms that specify said names
and values and other forms that use said names as references to said
values.

An instance of a code pattern may contain bindings.  If a macro is to
generate code for such instances, it has to be able to generate
binding forms, just as the programmer who would have otherwise
expressed said instance directly would have.

Note that some names can be generated while others must be specified.
It all depends on the pattern.
From: Rob Warnock
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <zf2dnSQwYoL0GCDbnZ2dnUVZ_uOmnZ2d@speakeasy.net>
Andy Freeman  <······@earthlink.net> wrote:
+---------------
| Macros don't just control evaluation.
| 
| Macros "just" write code.  That code may evaluate things that appeared
| as arguments to the macro, but it may also include code that wasn't in
| the macro call.  That code may define names that were macro arguments.
| 
| Macros let a programmer write shorter and clearer than was possible
| otherwise.
+---------------

In particular, macros give you access to the compile-time environment,
so you can do certain kinds of bookkeeping *once* at compile time and
have the results compiled into your program. That is, not only can you
write in a more declarative style, you can have said declarations link
themselves into your chosen application data structures at compile time!

I won't repeat the whole story again here, since I've told it several
times before [search for "FOCAL macros rpw3 Warnock" (without the quotes)
to find a few], but I once used macros in MACRO-10 assembler (for the
DEC PDP-10) to do some very complicated pre-processing for the lexer
for an implementation of the FOCAL interactive programming language.
Various macro calls that declared pieces of the language syntax were
sprinkled throughout the code, and these macro calls cooperated with
one another to build a *single* final highly-compacted data structure
which was dropped into the source of the program as initialization
statements at the very end. This data structure was then used at runtime
to provide an extremely efficient lexer/parser for FOCAL commands and
arithmetic expressions.

The same task would be trivial to do today with CL macros; it was
possible in 1970 with MACRO-10 only because the macros in the latter
were Turing-complete at compile time. As I've said before:

    Macros in MACRO-10 could do compile-time looping and branching,
    could tear apart lists of arguments and individual arguments
    (considered as lists of characters), could generate new symbols
    based on arguments provided, could define/set/modify compile-time
    variables (including those whose names were generated by macros),
    and since MACRO-10 was a two-pass assembler that provided the ".if1"
    and ".if2" tests, macros could gather information during pass one
    (storing it in compile-time variables/symbols) and during pass two
    emit local code chosen based on the *global* contents of the program! 

Sound familiar?!?  ;-}  ;-}

Or for a much simpler, more "Lispy" example, consider the following
macro used in the CMUCL compiler (file "unix.lisp") for generating
enums & bitmasks:

    > (defmacro def-enum (inc cur &rest names)
	(flet ((defform (name)
		 (prog1 (when name `(defconstant ,name ,cur))
		   (setf cur (funcall inc cur 1)))))
	  `(progn ,@(mapcar #'defform names))))

    DEF-ENUM
    > (macroexpand '(def-enum + 3 foo bar baz))   ; simple C-style enum

    (PROGN
      (DEFCONSTANT FOO 3)
      (DEFCONSTANT BAR 4)
      (DEFCONSTANT BAZ 5))
    T
    > (macroexpand '(def-enum ash 1 foo bar baz)) ; "enum" of bitmasks

    (PROGN
      (DEFCONSTANT FOO 1)
      (DEFCONSTANT BAR 2)
      (DEFCONSTANT BAZ 4))
    T
    > (defun rsh8 (x &optional y)
	(declare (ignore y))
	(ash x -8))
    > (macroexpand '(def-enum rsh8 #xff000000
			      byte3-mask byte2-mask byte1-mask byte0-mask))

    (PROGN
      (DEFCONSTANT BYTE3-MASK 4278190080)
      (DEFCONSTANT BYTE2-MASK 16711680)
      (DEFCONSTANT BYTE1-MASK 65280)
      (DEFCONSTANT BYTE0-MASK 255))
    T
    > 


-Rob

-----
Rob Warnock			<····@rpw3.org>
627 26th Avenue			<URL:http://rpw3.org/>
San Mateo, CA 94403		(650)572-2607
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-D373C5.23581910082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

>Because it's not syntax that's the difference. You don't need macros if 
> you don't need to control when an expression is evaluated.
> 
> Syntactically, both macros and function calls are just S-expressions in 
> Lisp. It's the semantics that differs, the time at which the expressions 
> are evaluated.
> You don't need to make that difference if the semantics of an expression 
> is independent of when it is evaluated.

Say you have an expression

(foo :bar)

If foo is a function, at runtime the function foo will be
called with :bar as an argument.

If foo is a macro, then at macro expansion time,
the macrofunction foo will be called with the source and needs to
return new source. The new source will be compiled
and the compiled new source runs at runtime.

So, macros are not about different evaluation time. Macros
are source transforming functions. They can generate
arbitrary source. The result of the source transformation
(new source) runs at runtime, just as the function would.
So the macro expansion is an additional step.

A macro allows you to write a function that takes source
as input and that generates new source as output.
This allows you to write compact source forms,
that can be manipulated macro functions into much
larger/complicated expressions. Expressions that
you don't want to write by hand.

-- 
http://lispm.dyndns.org
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9io84$s2f$1@online.de>
Rainer Joswig schrieb:
> In article <············@online.de>,
>  Joachim Durchholz <··@durchholz.org> wrote:
> 
>> Because it's not syntax that's the difference. You don't need macros if 
>> you don't need to control when an expression is evaluated.
>>
>> Syntactically, both macros and function calls are just S-expressions in 
>> Lisp. It's the semantics that differs, the time at which the expressions 
>> are evaluated.
>> You don't need to make that difference if the semantics of an expression 
>> is independent of when it is evaluated.
> 
> So, macros are not about different evaluation time. Macros
> are source transforming functions. They can generate
> arbitrary source. The result of the source transformation
> (new source) runs at runtime, just as the function would.
> So the macro expansion is an additional step.

Yes, I know that, but what does it buy me?

Let me try a concrete example, straight from Practical Common Lisp by 
Peter Seibel that somebody was kind enough to direct my attention to.

WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
higher-order functions. They'd take functions as parameters for the 
various conditions and bodies, and return a function that, when run, 
executes the constructed conditional resp. loop.

To make that smooth, you'd need as little syntactic overhead as 
possible. Haskell's syntax is minimal enough for that IMHO.

I'm not sure how much of that argument transfers to more complicated macros.

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186786575.388001.22590@d30g2000prg.googlegroups.com>
> Yes, I know that, but what does it buy me?

It's limited entirely by your imagination.

Very simplistic example. Say you want something in your application's
About Box like "version x.y.z built on HH:MM MM/DD/YY". You could have
some script in the build process that captured the info and put it
into a file the about box could read, or you could do:

(defmacro define-compile-time-getter (name)
  (multiple-value-bind (sec min hr day mon yr) (get-decoded-time)
    `(defun ,name () ,(format nil "~A:~A ~A/~A/~A" hr min mon day
yr))))

(define-compile-time-getter get-compile-time-string)

(get-compile-time-string) -> "18:35 8/10/2007"

... two days later...

(get-compile-time-string) -> "18:35 8/10/2007"

You can extended this basic technique arbitrarily far. Consider tools
which take an XML description of a GUI and generate code for a
toolkit. You can implement as functions in a library, instead of
dealing with the hassle of writing an external tool and integrating it
into your build system. It can also be used to encapsulate patterns in
code.

Eg: in my compiler, I've got "information classes" that encapsulate
all the various pieces of data maintained by a particular analysis. So
you might do:

(defclass liveness-info (information-block)
  ((livein :accessor liveness-livein :initform livein)
   (liveout :accessor liveness-liveout :initform liveout)))

Logically, these info objects are associated with a basic block or
temporary, but for reasons of memory use and maintainability, these
fields are not all defined in the temporary class, but rather accessed
via some indirect fields. For purposes of convenience, however, you
want to be able to quickly traverse the indirections and get at the
info. So you want some accessors:

(bb-livein basic-block) -> (liveness-livein (liveness-info (node-info
basic-block)))
(bb-liveout basic-block) -> (liveness-liveout (liveness-info (node-
info basic-block)))

Manually defining these accessors is painful and error-prone. So I
have a macro that takes a sexpr description of an info block and
generates both class and all the necessary accessors. The other
constraint is that this has to be fast. In the macro-based approach,
these accessors will in theory compile into quite efficient code. I
doubt an HOF-based mechanism would be as efficient without some heroic
optimization.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9kahp$98m$1@online.de>
Rayiner Hashem schrieb:
>> Yes, I know that, but what does it buy me?
> 
> It's limited entirely by your imagination.
> 
> Very simplistic example. Say you want something in your application's
> About Box like "version x.y.z built on HH:MM MM/DD/YY". You could have
> some script in the build process that captured the info and put it
> into a file the about box could read,

Right. All revision control systems that I know about do that.

 > or you could do:
> 
> (defmacro define-compile-time-getter (name)
>   (multiple-value-bind (sec min hr day mon yr) (get-decoded-time)
>     `(defun ,name () ,(format nil "~A:~A ~A/~A/~A" hr min mon day
> yr))))
> 
> (define-compile-time-getter get-compile-time-string)
> 
> (get-compile-time-string) -> "18:35 8/10/2007"
> 
> ... two days later...
> 
> (get-compile-time-string) -> "18:35 8/10/2007"

OK. Useful, not doable from within a pure language.
Not a killer application for macros since good alternatives exist, but 
useful anyway.

> You can extended this basic technique arbitrarily far. Consider tools
> which take an XML description of a GUI and generate code for a
> toolkit. You can implement as functions in a library, instead of
> dealing with the hassle of writing an external tool and integrating it
> into your build system.

This kind of stuff is being done in pure functional programs, too. E.g. 
there's a library that will generate heavily optimized C code given the 
specifications of a Fourier Transform. The author reported no troubles 
doing that.
(It would have been no problem generating heavily optimized OCaml or 
Haskell code instead.)

> It can also be used to encapsulate patterns in code.

That's the domain of HOFs in a non-Lisp FPL.

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186864785.369602.79130@k79g2000hse.googlegroups.com>
> Right. All revision control systems that I know about do that.

Right, but RCS's are external tools fraught with all the issues of
external tools. And an RCS would be able to do this particular
example, but cannot do more general ones. Eg: what happens when you
want to create functions based on an external (or even internal)
specification? Say I want to create a specialized parser for a format
from a DTD. You can achieve similar effect using HOFs by interpreting
the DTD at runtime, but you're not necessarily accomplishing the same
thing (ie: interpreting the formats at runtime is liable to be much
slower).

> This kind of stuff is being done in pure functional programs, too. E.g.
> there's a library that will generate heavily optimized C code given the
> specifications of a Fourier Transform. The author reported no troubles
> doing that.
> (It would have been no problem generating heavily optimized OCaml or
> Haskell code instead.)

I could do it in C too. The question is how convenient, integrated,
and transparent it is. There is also the issue of what happens when
you want to interleave your DSL with regular Lisp code. This is where
the lack of integration can really make things confusing and error-
prone (eg: Qt's MOC in C++).

> That's the domain of HOFs in a non-Lisp FPL.

HOF's have costs, both in terms of syntax and in terms of performance.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-49D97B.01060711082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Rainer Joswig schrieb:
> > In article <············@online.de>,
> >  Joachim Durchholz <··@durchholz.org> wrote:
> > 
> >> Because it's not syntax that's the difference. You don't need macros if 
> >> you don't need to control when an expression is evaluated.
> >>
> >> Syntactically, both macros and function calls are just S-expressions in 
> >> Lisp. It's the semantics that differs, the time at which the expressions 
> >> are evaluated.
> >> You don't need to make that difference if the semantics of an expression 
> >> is independent of when it is evaluated.
> > 
> > So, macros are not about different evaluation time. Macros
> > are source transforming functions. They can generate
> > arbitrary source. The result of the source transformation
> > (new source) runs at runtime, just as the function would.
> > So the macro expansion is an additional step.
> 
> Yes, I know that,

but your explanation sounded a bit confused:
  'It's the semantics that differs, the time at which the expressions are evaluated.'
In case of the macro, the expression does not get evaluated at all. Another
expression will be evaluated instead - the one generated by the macro
at macro-expansion-time.

> but what does it buy me?


* you can manipulate source code as data
* you can write short cuts for code
* you can write compilers for some source language
* you can use a data-drive coding style
* you can add new syntactic forms
* you can make your code more declarative

The last one is the biggest win, IMHO.

I can write a parser rule like this:

(defrule transfer
  := (DATE "gw [0]ip" "A" WHO SRC DST BYTES PACKETS)
  :build (:form (make-ip-record :time DATE :src SRC :dst DST :bytes BYTES)))

The macro transforms it a compile time into some parsing code.
The source form is completely disconnected from the implementation.

It allows you to do a style of programming that
looks more like configuration.

Some (= many) years ago I was processing call data and generated tables based on the
call events. I wrote a macro to configure the generated tables. Excerpt:


(deftables *log-tables*
  :accessors ((name "Name" name)
              (number "Nummer" number)

            ...
              (incoming-calls-sum "In-Anrufe" ((calls)
                                               (count-if #'(lambda (call)
                                                             (or (not (call-direction call))
                                                                 (eq (call-direction call) :in)))
                                                         calls)))
            ...

              (inside-number "In-Nummer" inside-number)
              (outside-number "Out-Nummer" outside-number)
              (cost "Kosten" cost)
              (units "Einheiten" units)
              (type "Art" type)
              (start-time "Start" ((date) (date-to-string date)))

            ...

           )


  :sorters (...

            (sort-calls-by-inside-number ((items)
                                          (sort-calls-by-number items
                                                                #'call-outside-number
                                                                #'call-inside-number
                                                                #'string<
                                                                #'<
                                                                ""
                                                                0)))
           ...
           )

  :tables ( ...

           (:name table-incoming-calls-1
            :caption "Eingehende Rufe, sortiert nach Nebenstelle"
            :generator #'incoming-calls
            :sorter sort-calls-by-inside-number
            :columns (inside-number outside-number cost units type start-time duration))

   ...  )


So, I have defined a macro which took descriptions for data accessors,
sorting routines and used those to specify tables. The macro called
a code generator, which generated the classes and methods to do this.

When a new run to generate the table data was needed (usually a few times
per month), only the code configuration (using the macro) was needed to be changed
in one place and it generated thousands of lines of codes (methods, classes, functions, ...),
based on the provided machinery.

> Let me try a concrete example, straight from Practical Common Lisp by 
> Peter Seibel that somebody was kind enough to direct my attention to.
> 
> WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
> higher-order functions. They'd take functions as parameters for the 
> various conditions and bodies, and return a function that, when run, 
> executes the constructed conditional resp. loop.
> 
> To make that smooth, you'd need as little syntactic overhead as 
> possible. Haskell's syntax is minimal enough for that IMHO.
> 
> I'm not sure how much of that argument transfers to more complicated macros.
> 
> Regards,
> Jo

-- 
http://lispm.dyndns.org
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9kb2o$a8c$1@online.de>
Rainer Joswig schrieb:
> In article <············@online.de>,
>  Joachim Durchholz <··@durchholz.org> wrote:
> 
>> Rainer Joswig schrieb:
>>> In article <············@online.de>,
>>>  Joachim Durchholz <··@durchholz.org> wrote:
>>>
>>>> Because it's not syntax that's the difference. You don't need macros if 
>>>> you don't need to control when an expression is evaluated.
>>>>
>>>> Syntactically, both macros and function calls are just S-expressions in 
>>>> Lisp. It's the semantics that differs, the time at which the expressions 
>>>> are evaluated.
>>>> You don't need to make that difference if the semantics of an expression 
>>>> is independent of when it is evaluated.
>>> So, macros are not about different evaluation time. Macros
>>> are source transforming functions. They can generate
>>> arbitrary source. The result of the source transformation
>>> (new source) runs at runtime, just as the function would.
>>> So the macro expansion is an additional step.
>> Yes, I know that,
> 
> but your explanation sounded a bit confused:

The problem is that different people give different reasons what macros 
are good for.

>   'It's the semantics that differs, the time at which the expressions are evaluated.'
> In case of the macro, the expression does not get evaluated at all.

But the macro call gets evaluated, and it is a kind of expression.

 > Another
> expression will be evaluated instead - the one generated by the macro
> at macro-expansion-time.

Exactly.

>> but what does it buy me?
> 
> 
> * you can manipulate source code as data

Can do with sources (code generation).
It's not accessible within the manipulating program, but I do like to 
keep the meta levels separate.

> * you can write short cuts for code

HOFs.

> * you can write compilers for some source language

Parsing libraries.

> * you can use a data-drive coding style

Not sure what that is.

> * you can add new syntactic forms

Not needed.

> * you can make your code more declarative

I'm not 100% sure how that would work out. I suspect it's possible using 
HOFs, but I haven't seen all cases of "make it more declarative" that 
you might mean.

> Some (= many) years ago I was processing call data and generated tables based on the
> call events. I wrote a macro to configure the generated tables. Excerpt:
> 
> 
> (deftables *log-tables*
>   :accessors ((name "Name" name)
>               (number "Nummer" number)
> 
>             ...
>               (incoming-calls-sum "In-Anrufe" ((calls)
>                                                (count-if #'(lambda (call)
>                                                              (or (not (call-direction call))
>                                                                  (eq (call-direction call) :in)))
>                                                          calls)))
>             ...
> 
>               (inside-number "In-Nummer" inside-number)
>               (outside-number "Out-Nummer" outside-number)
>               (cost "Kosten" cost)
>               (units "Einheiten" units)
>               (type "Art" type)
>               (start-time "Start" ((date) (date-to-string date)))
> 
>             ...
> 
>            )
> 
> 
>   :sorters (...
> 
>             (sort-calls-by-inside-number ((items)
>                                           (sort-calls-by-number items
>                                                                 #'call-outside-number
>                                                                 #'call-inside-number
>                                                                 #'string<
>                                                                 #'<
>                                                                 ""
>                                                                 0)))
>            ...
>            )
> 
>   :tables ( ...
> 
>            (:name table-incoming-calls-1
>             :caption "Eingehende Rufe, sortiert nach Nebenstelle"
>             :generator #'incoming-calls
>             :sorter sort-calls-by-inside-number
>             :columns (inside-number outside-number cost units type start-time duration))
> 
>    ...  )
> 
> 
> So, I have defined a macro which took descriptions for data accessors,
> sorting routines and used those to specify tables. The macro called
> a code generator, which generated the classes and methods to do this.

Higher-order programming does just the same: generate functions that do 
the real work.

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186865011.907303.88320@k79g2000hse.googlegroups.com>
> > * you can write compilers for some source language
>
> Parsing libraries.

I don't know a terminal from an identifier, and I totally skipped over
DFAs in my books, but I can still handle writing Lisp macros. The
question, again, is one of lowering the barrier to using a particular
technique.

> > * you can make your code more declarative

Think XAML, without the headache of not being able to interleave code
with declarative specifications.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13canrsnf6bftc1@corp.supernews.com>
Rayiner Hashem wrote:
>> > * you can write compilers for some source language
>>
>> Parsing libraries.
> 
> I don't know a terminal from an identifier, and I totally skipped over
> DFAs in my books, but I can still handle writing Lisp macros. The
> question, again, is one of lowering the barrier to using a particular
> technique.

There are many excellent parsing libraries out there. Many offer static
checking of grammars to some degree. Moreover, they handle arbitrary syntax
rather than just s-exprs.

>> > * you can make your code more declarative
> 
> Think XAML, without the headache of not being able to interleave code
> with declarative specifications.

Haskell already is declarative.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bsevrn07dp7f8@corp.supernews.com>
Rayiner Hashem wrote:
>> > * you can write compilers for some source language
>>
>> Parsing libraries.
> 
> I don't know a terminal from an identifier, and I totally skipped over
> DFAs in my books, but I can still handle writing Lisp macros. The
> question, again, is one of lowering the barrier to using a particular
> technique.

There are many excellent parsing libraries out there. Many offer static
checking of grammars to some degree. Moreover, they handle arbitrary syntax
rather than just s-exprs.

>> > * you can make your code more declarative
> 
> Think XAML, without the headache of not being able to interleave code
> with declarative specifications.

Haskell already is declarative.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186873956.930077.308360@l70g2000hse.googlegroups.com>
> There are many excellent parsing libraries out there. Many offer static
> checking of grammars to some degree. Moreover, they handle arbitrary syntax
> rather than just s-exprs.

I don't understand EBNF, and I have no desire to learn it. I've yet to
encounter something that couldn't be adequately expressed as an sexpr,
or yet to make something important enough to really deserve it's own
syntax...

> Haskell already is declarative.

At a much lower level. I hate to keep using my own examples, but
what's the Haskell equivalent of this code:

(define-encoder mov (dest source)
  ((rm8 r8) (#x88 /rm))
  ((rm32 r32) (#x89 /rm))
  ((rm64 r64) (#x89 /rm))
  ((r8 rm8) (#x8A /r))
  ((r32 rm32) (#x8B /r))
  ((r64 rm64) (#x8B /r))
  ((r8 imm8) (#xB0 +r ib))
  ((r32 imm32) (#xB8 +r id))
  ((rm64 imm32) (#xC7 /0 id))
  ((r64 imm64) (#xB8 +r iq))
  ((rm8 imm8) (#xC6 /0 ib))
  ((rm32 imm32) (#xC7 /0 id)))

This defines a function that takes an assembly instruction sexpr as
input, and emits an encode byte-stream as output. What is the Haskell
equivalent of this code?
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186875171.043023.9320@d55g2000hsg.googlegroups.com>
> This defines a function that takes an assembly instruction sexpr as
> input, and emits an encode byte-stream as output. What is the Haskell
> equivalent of this code?

Just to follow-up. The above is probably not that painful to express
as a pattern-match (except with more syntactic noise), but what the
equivalent of this:

(defmacro define-type0-encoder (name base subcode)
  (let ((base1 base)
	(base2 (+ base 1))
	(base3 (+ base 2))
	(base4 (+ base 3)))
    `(define-encoder ,name (dest source)
       ((rm8 imm8) (#x80 ,subcode ib))
       ((rm32 imm8) (#x83 ,subcode ib))
       ((rm64 imm8) (#x83 ,subcode ib))
       ((rm32 imm32) (#x81 ,subcode id))
       ((rm64 imm32) (#x81 ,subcode id))
       ((rm8 r8) (,base1 /rm))
       ((rm32 r32) (,base2 /rm))
       ((rm64 r64) (,base2 /rm))
       ((r8 rm8) (,base3 /r))
       ((r32 rm32) (,base4 /r))
       ((r64 rm64) (,base4 /r)))))

(define-type0-encoder add #x00 /0)
(define-type0-encoder adc #x10 /2)
(define-type0-encoder and #x20 /4)
(define-type0-encoder xor #x30 /6)
(define-type0-encoder or  #x08 /1)
(define-type0-encoder sbb #x18 /3)
(define-type0-encoder sub #x28 /5)
(define-type0-encoder cmp #x38 /7)

The last 8 lines expand into some 80 lines worth of pattern matches,
which themselves expand into some 500 lines worth of Lisp code.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13buj4l3bbskg6a@corp.supernews.com>
Rayiner Hashem wrote:
>> This defines a function that takes an assembly instruction sexpr as
>> input, and emits an encode byte-stream as output. What is the Haskell
>> equivalent of this code?
> 
> Just to follow-up. The above is probably not that painful to express
> as a pattern-match (except with more syntactic noise

You might like to look at the results before drawing your conclusion.

> but what the equivalent of this:

Note that I am unable to get this to work in SBCL and have no context or
examples.

> (defmacro define-type0-encoder (name base subcode)
>   (let ((base1 base)
> (base2 (+ base 1))
> (base3 (+ base 2))
> (base4 (+ base 3)))
>     `(define-encoder ,name (dest source)
>        ((rm8 imm8) (#x80 ,subcode ib))
>        ((rm32 imm8) (#x83 ,subcode ib))
>        ((rm64 imm8) (#x83 ,subcode ib))
>        ((rm32 imm32) (#x81 ,subcode id))
>        ((rm64 imm32) (#x81 ,subcode id))
>        ((rm8 r8) (,base1 /rm))
>        ((rm32 r32) (,base2 /rm))
>        ((rm64 r64) (,base2 /rm))
>        ((r8 rm8) (,base3 /r))
>        ((r32 rm32) (,base4 /r))
>        ((r64 rm64) (,base4 /r)))))

This looks like a flat pattern match that is incomplete, maybe something
like this:

# let define_type0_encoder base subcode = function
    | `rm8, `imm8 -> [`C 0x80; `I subcode; `ib]
    | `rm32, `imm8 -> [`C 0x83; `I subcode; `ib]
    | `rm64, `imm8 -> [`C 0x83; `I subcode; `ib]
    | `rm32, `imm32 -> [`C 0x81; `I subcode; `id]
    | `rm64, `imm32 -> [`C 0x81; `I subcode; `id]
    | `rm8, `r8 -> [`I base; `rm]
    | `rm32, `r32 -> [`I(base+1); `rm]
    | `rm64, `r64 -> [`I(base+1); `rm]
    | `rm8, `rm8 -> [`I(base+2); `rm]
    | `rm32, `rm32 -> [`I(base+3); `rm]
    | `rm64, `rm64 -> [`I(base+3); `rm];;
Warning P: this pattern-matching is not exhaustive.
Here is an example of a value that is not matched:
(`rm64, (`r8|`r32|`rm8|`rm32))
val define_type0_encoder :
  int ->
  int ->
  [< `rm32 | `rm64 | `rm8 ] *
  [< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>

I suspect ib, id and rm in the output are just ints, so the output could be
encoded more efficiently as an int list or maybe a char list.

> (define-type0-encoder add #x00 /0)
> (define-type0-encoder adc #x10 /2)
> (define-type0-encoder and #x20 /4)
> (define-type0-encoder xor #x30 /6)
> (define-type0-encoder or  #x08 /1)
> (define-type0-encoder sbb #x18 /3)
> (define-type0-encoder sub #x28 /5)
> (define-type0-encoder cmp #x38 /7)

Just partial applications of define-type0-encoder function:

# let _add, _adc, _and, _xor, _or, _sbb, _sub, _cmp =
    let f = define_type0_encoder in
    f 0x00 0, f 0x10 2, f 0x20 4, f 0x30 6,
    f 0x08 1, f 0x18 3, f 0x28 5, f 0x38 7;;
val _add :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _adc :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _and :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _xor :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _or :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _sbb :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _sub :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>
val _cmp :
  _[< `rm32 | `rm64 | `rm8 ] *
  _[< `imm32 | `imm8 | `r32 | `r64 | `r8 | `rm32 | `rm64 | `rm8 ] ->
  [> `C of int | `I of int | `ib | `id | `rm ] list = <fun>

> The last 8 lines expand into some 80 lines worth of pattern matches,
> which themselves expand into some 500 lines worth of Lisp code.

This did not require macros or even higher-order functions.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186944863.242304.38750@57g2000hsv.googlegroups.com>
> This looks like a flat pattern match that is incomplete, maybe something
> like this:

LOL. That's because the x86 instruction set is incomplete!

Anyway, you misunderstood the problem. I'm not just taking symbols and
returning symbols. As I said, I'm taking instructions and generating a
function that encodes that instruction to bytes. Ie:

((rm64 imm32)) is a pattern that matches any 64-bit register or 64-bit
memory operand expression in the destination position and an 32-bit
immediate integer in the sourceposition. So it would match the list:

(:mov :rax 8) ; rax is a 64-bit register, the number "8" fits into a
32-bit integer

and

(:mov (:word :rpb :rcx 8 130) 112) ; the inner list is a memory
operand expression

but not

(:mov :ebx 50) ; ebx is a 32-bit register, so doesn't match the "rm64"
pattern

This makes a bit more sense if you know Intel assembler, but you get
the idea.

The expansion (#xC7 /0 id) is not some symbols, but describes how to
encode an instruction. For this particular one, I want to generate a
function that given appropriate dest and source expressions (like
those above), will generate a byte vector encoding that instruction.
Eg:

; take "dest" and "source" expressions as parameters
#xC7 -> set opcode to #xC7
/0 -> set subcode field to 0, set destination to byte-encoded form of
"dest"
id -> set immediate field to 32-bit immediate operand in "source"

The result of the macro is a function that takes an instruction
expression as a parameter and performs the above operations to
generate a byte-encoded form of the instruction.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bvk6e2s4a8sd6@corp.supernews.com>
Rayiner Hashem wrote:
> ((rm64 imm32)) is a pattern that matches any 64-bit register or 64-bit
> memory operand expression in the destination position and an 32-bit
> immediate integer in the sourceposition. So it would match the list:
> 
> (:mov :rax 8) ; rax is a 64-bit register, the number "8" fits into a
> 32-bit integer
> 
> and
> 
> (:mov (:word :rpb :rcx 8 130) 112) ; the inner list is a memory
> operand expression
> 
> but not
> 
> (:mov :ebx 50) ; ebx is a 32-bit register, so doesn't match the "rm64"
> pattern
> 
> This makes a bit more sense if you know Intel assembler, but you get
> the idea.

There are various ways to implement this. You might write a "type_of"
function to extract the run-time type of an expression and then match over
the types:

  match type_of dest, type_of src with
  | (R8|M8), Imm8 -> ...
  ...

> The expansion (#xC7 /0 id)

Where did this come from? I don't believe I have seen it before.

> is not some symbols, but describes how to encode an instruction.

Without knowing the set of possible values I cannot define a good static
type to implement this but I suspect it is just a function call with the
arguments:

  0xC7 0 id

> For this particular one, I want to generate a 
> function that given appropriate dest and source expressions (like
> those above),

So the first two arguments would be dest and src.

> will generate a byte vector encoding that instruction. 
> Eg:
> 
> ; take "dest" and "source" expressions as parameters
> #xC7 -> set opcode to #xC7
> /0 -> set subcode field to 0, set destination to byte-encoded form of
> "dest"
> id -> set immediate field to 32-bit immediate operand in "source"

There are lots of different ways to implement this and choosing the best
depends upon details of the program that I do not yet know.

This might become:

  let f3 = emit3 dest src in
  match type_of dest, type_of src with
  | (R8|M8), Imm8 -> f3 0xC7 0 id
  ...

> The result of the macro is a function that takes an instruction
> expression as a parameter and performs the above operations to
> generate a byte-encoded form of the instruction.

I would just write this as a curried function that initially took the
parameters of the macro and then the parameters of the function that your
macro generates.

The type of the resulting function might be something like:

  val encode : base -> subcode -> dest -> src -> byte array

You can implement this curried version in Lisp as well, of course.

Moreover, it looks like you are using several sum types and a couple of
product types that would all map onto static types very well. I expect
static checking would buy you a lot here.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13buh5oic6crl66@corp.supernews.com>
Rayiner Hashem wrote:
> (define-encoder mov (dest source)
>   ((rm8 r8) (#x88 /rm))
>   ((rm32 r32) (#x89 /rm))
>   ((rm64 r64) (#x89 /rm))
>   ((r8 rm8) (#x8A /r))
>   ((r32 rm32) (#x8B /r))
>   ((r64 rm64) (#x8B /r))
>   ((r8 imm8) (#xB0 +r ib))
>   ((r32 imm32) (#xB8 +r id))
>   ((rm64 imm32) (#xC7 /0 id))
>   ((r64 imm64) (#xB8 +r iq))
>   ((rm8 imm8) (#xC6 /0 ib))
>   ((rm32 imm32) (#xC7 /0 id)))

A special case of pattern matching.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186943860.348548.106450@22g2000hsm.googlegroups.com>
> > (define-encoder mov (dest source)
> >   ((rm8 r8) (#x88 /rm))
> >   ((rm32 r32) (#x89 /rm))
> >   ((rm64 r64) (#x89 /rm))
> >   ((r8 rm8) (#x8A /r))
> >   ((r32 rm32) (#x8B /r))
> >   ((r64 rm64) (#x8B /r))
> >   ((r8 imm8) (#xB0 +r ib))
> >   ((r32 imm32) (#xB8 +r id))
> >   ((rm64 imm32) (#xC7 /0 id))
> >   ((r64 imm64) (#xB8 +r iq))
> >   ((rm8 imm8) (#xC6 /0 ib))
> >   ((rm32 imm32) (#xC7 /0 id)))
>
> A special case of pattern matching.

The pattern-matching is the easy part. The hard part is expanding the
rule on the right-hand side. Consider the line: ((r64 imm64) (#xB8 +r
iq))

This expands to:

((AND (OPERAND-MATCHES? DEST 'R64) (OPERAND-MATCHES? SOURCE 'IMM64))
    (LET ((#:G1732 (MAKE-OPRINFO)) (#:G1733 (NEW-OCINFO)))
      (SETF (OCINFO-OVERRIDE? #:G1733) T)
      (ADD-REG-OPERAND #:G1732 DEST 'OP)
      (ADD-IMMEDIATE-OPERAND #:G1732 SOURCE 8 ':ABS)
      (VECTOR-PUSH-EXTEND 184 (OCINFO-OPCODES #:G1733))
      (VALUES #:G1733 #:G1732)))

The first line implements the pattern-match. In the third line, the
macro notices that the pattern contains a 64-bit operand, and
generated code to mark the instruction as needing a 64-bit override
prefix. The third and fourth lines add operands based on both the left
and right-hand sides.

Without macros, you could encode the whole sequence of operations in
the expansion into a function, and call that, but there are a ton of
different combinations to encode.

Even that doesn't address the other features of the macro-based
system. In addition to generating the encoders, it stores the patterns
and right-hand sides so a random-tester can use them to generate test
cases for the patterns. This falls out almost for free in the macro-
based system, but I really can't see how you'd do it with HOFs and
pattern-matching.

Ultimately, I have a really hard time seeing an equivalent solution
without macros. I think the syntactic overhead of HOFs will just kill
you here. I've got patterns for 270 x86 instructions, and there are
probably 350+ overall. Each of these instructions has 1-10 forms, and
writing all those matches manually would get really error-prone. Using
a syntax very similar to the Intel manuals helps enormously, making a
tedious process very straightforward and hard to muck up.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bvicuagrfe6d5@corp.supernews.com>
Rayiner Hashem wrote:
>> > (define-encoder mov (dest source)
>> >   ((rm8 r8) (#x88 /rm))
>> >   ((rm32 r32) (#x89 /rm))
>> >   ((rm64 r64) (#x89 /rm))
>> >   ((r8 rm8) (#x8A /r))
>> >   ((r32 rm32) (#x8B /r))
>> >   ((r64 rm64) (#x8B /r))
>> >   ((r8 imm8) (#xB0 +r ib))
>> >   ((r32 imm32) (#xB8 +r id))
>> >   ((rm64 imm32) (#xC7 /0 id))
>> >   ((r64 imm64) (#xB8 +r iq))
>> >   ((rm8 imm8) (#xC6 /0 ib))
>> >   ((rm32 imm32) (#xC7 /0 id)))
>>
>> A special case of pattern matching.
> 
> The pattern-matching is the easy part. The hard part is expanding the
> rule on the right-hand side. Consider the line: ((r64 imm64) (#xB8 +r
> iq))
> 
> This expands to:
> 
> ((AND (OPERAND-MATCHES? DEST 'R64) (OPERAND-MATCHES? SOURCE 'IMM64))
>     (LET ((#:G1732 (MAKE-OPRINFO)) (#:G1733 (NEW-OCINFO)))
>       (SETF (OCINFO-OVERRIDE? #:G1733) T)
>       (ADD-REG-OPERAND #:G1732 DEST 'OP)
>       (ADD-IMMEDIATE-OPERAND #:G1732 SOURCE 8 ':ABS)
>       (VECTOR-PUSH-EXTEND 184 (OCINFO-OPCODES #:G1733))
>       (VALUES #:G1733 #:G1732)))

I assume I haven't seen the code that does this?

> The first line implements the pattern-match. In the third line, the
> macro notices that the pattern contains a 64-bit operand, and
> generated code to mark the instruction as needing a 64-bit override
> prefix. The third and fourth lines add operands based on both the left
> and right-hand sides.

Ok.

> Without macros, you could encode the whole sequence of operations in
> the expansion into a function, and call that, but there are a ton of
> different combinations to encode.

You should be able to parameterise your function as you have with the macro.
The only difference is that it gets evaluated at run-time.

> Even that doesn't address the other features of the macro-based
> system. In addition to generating the encoders, it stores the patterns
> and right-hand sides so a random-tester can use them to generate test
> cases for the patterns. This falls out almost for free in the macro-
> based system, but I really can't see how you'd do it with HOFs and
> pattern-matching.

The approaches would differ here. With a static type system you would design
the types used to represent the values passed to each function such that
they convey as many constraints to the type checker as possible. You would
probably only unit test the whole code emitter: individual functions are
rarely unit tested in ML.

> Ultimately, I have a really hard time seeing an equivalent solution
> without macros.

Macros just rewrite terms and you can easily write a term rewriter as a
function.

> I think the syntactic overhead of HOFs will just kill you here.

Currying should alleviate that and pattern matching will greatly reduce
syntactic overhead compared to Lisp's macros.

> I've got patterns for 270 x86 instructions, and there are 
> probably 350+ overall. Each of these instructions has 1-10 forms, and
> writing all those matches manually would get really error-prone.

You would not expand the code by hand, of course.

> Using 
> a syntax very similar to the Intel manuals helps enormously, making a
> tedious process very straightforward and hard to muck up.

OCaml's i386 code emitter is only 907 LOC.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186976999.409192.100260@w3g2000hsg.googlegroups.com>
> I assume I haven't seen the code that does this?

It's about ~150 lines of very straight-forward code that does the
obvious transformation.

> You should be able to parameterise your function as you have with the macro.
> The only difference is that it gets evaluated at run-time.

Right, you could write a function that took an assembly expression and
a template expressed as lists and did the interpretation at run-time.
You'd essentially have a table-driven assembler, which is a perfectly
adequate solution, but not the one I wanted to build. In general, you
can push direct assemblers a lot further performance-wise than table-
driven ones.

> The approaches would differ here. With a static type system you would design
> the types used to represent the values passed to each function such that
> they convey as many constraints to the type checker as possible.

I'd be floored if you could express the machine constraints of x86 in
any useful way in the ML type system.

> You would
> probably only unit test the whole code emitter: individual functions are
> rarely unit tested in ML.

You need to test the whole instruction set supported by the assembler.
It's very easy to make mistakes that seem perfectly fine to the type-
checker, such as making a typo in an op-code, or specifying an
instruction form that's not actually supported by the ISA. The random
tester interprets the source patterns to generate random source code,
and then compares the assembled machine code against the output of an
external assembler. This falls out easily if you build a table-driven
assembler, but again, that's not what I wanted.

> Macros just rewrite terms and you can easily write a term rewriter as a
> function.

A macro *is* just a function. And yes, you can write a term rewriter
in ML, the question is: what does the system let you rewrite, and what
does it let you do with the output? The Lisp macro system let's your
rewriter operate on source code, and let's you compile the result.
That's where the utility comes from.

> Currying should alleviate that and pattern matching will greatly reduce
> syntactic overhead compared to Lisp's macros.

There is almost no syntactic overhead in the example I presented. Get
rid of the parens, and you're left with the specification language
Intel uses in their manuals. HOFs could only do worse here.

> OCaml's i386 code emitter is only 907 LOC.

That's nice, but, uh, irrelevant. O'Caml's code emitter a) doesn't
include an assembler, and b) only supports enough of the instruction
set to support the compiler. Oh, and I find it entertaining that they
don't even try to represent the i386 ISA in some sort of typed
framework. They just build the assembly source fragments with
strings...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c054rrqmnj0f1@corp.supernews.com>
Rayiner Hashem wrote:
>> I assume I haven't seen the code that does this?
> 
> It's about ~150 lines of very straight-forward code that does the
> obvious transformation.

Ok. I'm not familiar with i386 assembler so this is far from obvious to
me. :-)

>> You should be able to parameterise your function as you have with the
>> macro. The only difference is that it gets evaluated at run-time.
> 
> Right, you could write a function that took an assembly expression and
> a template expressed as lists and did the interpretation at run-time.
> You'd essentially have a table-driven assembler, which is a perfectly
> adequate solution, but not the one I wanted to build. In general, you
> can push direct assemblers a lot further performance-wise than table-
> driven ones.

Learning from the symbolic rewriter benchmark, optimizing your pattern
matches should hugely improve the performance of your program. This is
probably as beneficial as compilation itself.

>> Macros just rewrite terms and you can easily write a term rewriter as a
>> function.
> 
> A macro *is* just a function. And yes, you can write a term rewriter
> in ML, the question is: what does the system let you rewrite, and what
> does it let you do with the output? The Lisp macro system let's your
> rewriter operate on source code, and let's you compile the result.
> That's where the utility comes from.

I would certainly start by writing an interpreter and profiling it if
performance turned out to be an issue. As for the automated testing you're
doing, you can do that by generating random values from OCaml types.

>> Currying should alleviate that and pattern matching will greatly reduce
>> syntactic overhead compared to Lisp's macros.
> 
> There is almost no syntactic overhead in the example I presented.

In the example that you chose, yes. However, most of the code
in "encoders.lisp" is written very redundantly. For example:

(defmacro define-x-encoder (name &rest opcodes)
  `(define-encoder ,name (dest source)
     ((x x) (,@opcodes /r))))

(defmacro define-xm128-encoder (name &rest opcodes)
  `(define-encoder ,name (dest source)
     ((x xm128) (,@opcodes /r))))

(defmacro define-xm64-encoder (name &rest opcodes)
  `(define-encoder ,name (dest source)
     ((x xm64) (,@opcodes /r))))

(defmacro define-xm32-encoder (name &rest opcodes)
  `(define-encoder ,name (dest source)
     ((x xm32) (,@opcodes /r))))

(defmacro define-cmp-encoder (name &rest opcodes)
  `(define-encoder ,name (dest source cmp)
     ((x xm128 imm8) (,@opcodes /r ib))))

These would be one line each in a pattern match. Or here:

(define-xm128-encoder addpd #x66 #x0F #x58)
(define-xm128-encoder addps #x0F #x58)
(define-xm128-encoder addsd #xF2 #x0F #x58)
(define-xm128-encoder addss #xF3 #x0F #x58)

(define-xm128-encoder addsubpd #x66 #x0F #xD0)
(define-xm128-encoder addsubps #xF2 #x0F #xD0)

(define-xm128-encoder andnpd #x66 #x0F #x55)
(define-xm128-encoder andnps #x0F #x55)
(define-xm128-encoder andpd #x66 #x0F #x54)
(define-xm128-encoder andps #x0F #x54)

You could construct all simultaneously as I did before.

> Get  
> rid of the parens, and you're left with the specification language
> Intel uses in their manuals.

You've also got "`", "'", "," and ·@".

> HOFs could only do worse here. 

I actually think HOFs are irrelevant here: I haven't seen anything that
needs to be parameterized over a function.

Now that I come to think of it, I'd be surprised if most of this work hadn't
been done before in a way that can be reused.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xsl6nv8rh.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> > It's about ~150 lines of very straight-forward code that does the
> > obvious transformation.
> Ok. I'm not familiar with i386 assembler so this is far from obvious to me. :-)

I used to understand the x86 pretty well, and I haven't been able to make sense
of this thread either.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <akabsv1mcb.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Jon Harrop <···@ffconsultancy.com> writes:
>> > It's about ~150 lines of very straight-forward code that does the
>> > obvious transformation.
>> Ok. I'm not familiar with i386 assembler so this is far from obvious to me. :-)
>
> I used to understand the x86 pretty well, and I haven't been able to make sense
> of this thread either.

As I understand it (skimming over Rayiners eyplanations) iat's all
about an example who's <whatever> presumably cannot be caught by a
ML-style (Hindley-Milner) type system. Considering that in HM-type
systems I can always use parametric polymorphism or fall back on "the
big union type" and emulate dynmic types this way, I'd be pretty
surprised if this were so.

Actually I think that we have another case of trying to use the type
system to "encode" a specification completely (!). Since I can't do
that with dynamic types at all, I wonder why I should suddenly be
required to do so with static type system.

At worst case we have a case here where the static type system isn't
useful with regard to ensuring adherence to specifications. But in
this case a single example is completely useless, because it can't
demonstrate that this indeed would be the general case. It leaves me
confused. 

(And now, please, the obvious reply ...)

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4bt4r8e3ejc6@corp.supernews.com>
Markus E.L. 2 wrote:
> As I understand it (skimming over Rayiners eyplanations) iat's all
> about an example who's <whatever> presumably cannot be caught by a
> ML-style (Hindley-Milner) type system. Considering that in HM-type
> systems I can always use parametric polymorphism or fall back on "the
> big union type" and emulate dynmic types this way, I'd be pretty
> surprised if this were so.

I was going to say that you can perform static tests on overlapping sets of
union types in OCaml using polymorphic variants. I originally thought that
RM meant R|M but it turns out that it is a separate constructor. So you can
probably encode all of the static type information in ML's type system: you
need anything fancy in this case.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13hcn1emqd.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Markus E.L. 2 wrote:
>> As I understand it (skimming over Rayiners eyplanations) iat's all
>> about an example who's <whatever> presumably cannot be caught by a
>> ML-style (Hindley-Milner) type system. Considering that in HM-type
>> systems I can always use parametric polymorphism or fall back on "the
>> big union type" and emulate dynmic types this way, I'd be pretty
>> surprised if this were so.
>
> I was going to say that you can perform static tests on overlapping sets of
> union types in OCaml using polymorphic variants. I originally thought that
> RM meant R|M but it turns out that it is a separate constructor. So you can

You lost me there :-) at R|M vs. RM?

> probably encode all of the static type information in ML's type
> system: you need anything fancy in this case.

In _this_ case, yes. Thant, who has been claiming in a another
subthread around the corner that static typing can completely
integrate dynamic typing (and as I seem to have suggested, but that is
not totally right (I hope)), is not quite right though. Falling back
to the big union type helps in this case and probably a lot of other
cases, but is, strictly spoken not equivalent to the dynamic types of
Lisp. You can extent the emulation of dynamic typing in ML by union
types to completely cover the all dynamic types of Lisp, but then
something interesting happens ...

I won't give the solution now (and I might admittedly be completely
mistaken, since it's only something I have been thinking about two
days ago when showering :-) -- the need to emulate dynamic types in a
statically typed language arises more rarely than some people here
seem to think :-)), since I want to see wether someone of the
dynamic-types-a-superior-faction can spot the point (instead of
denying the power of the big union type altogether which shows that he
or she hasn't understood it). IMHO the dynamic typing fan crowd
already dropped the ball when trying to find the right argument(s)
regarding the attempt to "run badly typed programs".

I admit to a certain amount of malice on my side (owing to the insults
I already had to swallow) that I don't deliver them the argument they
might need so urgently to make their case. Let me see, wether they
find it by themselves if they are so good as they style themselves.

Regards -- markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c7k71h02jnq1d@corp.supernews.com>
Markus E.L. 2 wrote:
> Jon Harrop wrote:
>> I was going to say that you can perform static tests on overlapping sets
>> of union types in OCaml using polymorphic variants. I originally thought
>> that RM meant R|M but it turns out that it is a separate constructor. So
>> you can
> 
> You lost me there :-) at R|M vs. RM?

Rayiners code contained a match case along the lines of:

  ((rm32 imm32) (...))

I assumed the rm32 meant either r32 or m32 but, in fact, it just means rm32.
So there is no need for the or-pattern. I had written:

  | (R32|M32), Imm32 -> ...

but that can just be:

  | RM32, Imm32 -> ...

This is good for Rayiner, because the fewer advanced pattern matching
constructs are necessary the less need there is to use a pattern matcher in
the first place. I'd still expect the OCaml to be several times faster
though.

>> probably encode all of the static type information in ML's type
>> system: you need anything fancy in this case.
> 
> In _this_ case, yes. Thant, who has been claiming in a another
> subthread around the corner that static typing can completely
> integrate dynamic typing (and as I seem to have suggested, but that is
> not totally right (I hope)), is not quite right though. Falling back
> to the big union type helps in this case and probably a lot of other
> cases, but is, strictly spoken not equivalent to the dynamic types of
> Lisp. You can extent the emulation of dynamic typing in ML by union
> types to completely cover the all dynamic types of Lisp, but then
> something interesting happens ...

An important difference is the ability to express both open and closed sum
types. Surprisingly few languages can do that and OCaml is one of them.

> I won't give the solution now (and I might admittedly be completely
> mistaken, since it's only something I have been thinking about two
> days ago when showering :-) -- the need to emulate dynamic types in a
> statically typed language arises more rarely than some people here
> seem to think :-)), since I want to see wether someone of the
> dynamic-types-a-superior-faction can spot the point (instead of
> denying the power of the big union type altogether which shows that he
> or she hasn't understood it). IMHO the dynamic typing fan crowd
> already dropped the ball when trying to find the right argument(s)
> regarding the attempt to "run badly typed programs".

Yes. I was expecting that to go down the:

  "but you can't even write the ubiquitously-useful polymorphically
recursive functions directly in OCaml."

but, for whatever reason, they chose:

  "I want to feed bad code into my compiler and work on the assumption that
it has no effect on everything else."

for some subjective notion of "bad code" that apparently excludes lexical
and grammatical errors but includes type errors. I know exactly where
they're coming from because this is just what I used to do.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <iybqd8f0tj.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Markus E.L. 2 wrote:
>> Jon Harrop wrote:
>>> I was going to say that you can perform static tests on overlapping sets
>>> of union types in OCaml using polymorphic variants. I originally thought
>>> that RM meant R|M but it turns out that it is a separate constructor. So
>>> you can
>> 
>> You lost me there :-) at R|M vs. RM?
>
> Rayiners code contained a match case along the lines of:
>
>   ((rm32 imm32) (...))
>
> I assumed the rm32 meant either r32 or m32 but, in fact, it just means rm32.
> So there is no need for the or-pattern. I had written:
>
>   | (R32|M32), Imm32 -> ...
>
> but that can just be:
>
>   | RM32, Imm32 -> ...
>
> This is good for Rayiner, because the fewer advanced pattern matching
> constructs are necessary the less need there is to use a pattern matcher in
> the first place. I'd still expect the OCaml to be several times faster
> though.
>
>>> probably encode all of the static type information in ML's type
>>> system: you need anything fancy in this case.
>> 
>> In _this_ case, yes. Thant, who has been claiming in a another
>> subthread around the corner that static typing can completely
>> integrate dynamic typing (and as I seem to have suggested, but that is
>> not totally right (I hope)), is not quite right though. Falling back
>> to the big union type helps in this case and probably a lot of other
>> cases, but is, strictly spoken not equivalent to the dynamic types of
>> Lisp. You can extent the emulation of dynamic typing in ML by union
>> types to completely cover the all dynamic types of Lisp, but then
>> something interesting happens ...
>
> An important difference is the ability to express both open and closed sum
> types. Surprisingly few languages can do that and OCaml is one of them.
>
>> I won't give the solution now (and I might admittedly be completely
>> mistaken, since it's only something I have been thinking about two
>> days ago when showering :-) -- the need to emulate dynamic types in a
>> statically typed language arises more rarely than some people here
>> seem to think :-)), since I want to see wether someone of the
>> dynamic-types-a-superior-faction can spot the point (instead of
>> denying the power of the big union type altogether which shows that he
>> or she hasn't understood it). IMHO the dynamic typing fan crowd
>> already dropped the ball when trying to find the right argument(s)
>> regarding the attempt to "run badly typed programs".
>
> Yes. I was expecting that to go down the:
>
>   "but you can't even write the ubiquitously-useful polymorphically
> recursive functions directly in OCaml."

:-). So would I. 

>
> but, for whatever reason, they chose:
>
>   "I want to feed bad code into my compiler and work on the assumption that
> it has no effect on everything else."

I'd have expected the phrase "extending semantics" here and then a
discussion wether and when that is possible could have begun. Actually
I think it's not impossible to write such a "tolerant" compiler by
translating OCaml syntax schematically into Scheme or Lisp (without
typing at all) and see what happens. I think this can be done in a way
that the resulting Lisp code has the semantics of the Ocaml program
(if that is well typed) and that a number of the badly typed cases
would also run and produce, well, interesting results. But why anybody
would want something like this is still beyond me.

> for some subjective notion of "bad code" that apparently excludes lexical
> and grammatical errors but includes type errors. 

:-)

> I know exactly where they're coming from because this is just what I
> used to do.

:-).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c7uhhsr8nmn98@corp.supernews.com>
Markus E.L. 2 wrote:
> I'd have expected the phrase "extending semantics" here and then a
> discussion wether and when that is possible could have begun. Actually
> I think it's not impossible to write such a "tolerant" compiler by
> translating OCaml syntax schematically into Scheme or Lisp (without
> typing at all) and see what happens. I think this can be done in a way
> that the resulting Lisp code has the semantics of the Ocaml program
> (if that is well typed) and that a number of the badly typed cases
> would also run and produce, well, interesting results. But why anybody
> would want something like this is still beyond me.

That is actually done a lot but as a learning exercise and not because the
result is useful. There is an implementation for a cut-down,
dynamically-typed OCaml here, for example:

  http://www.ffconsultancy.com/ocaml/benefits/interpreter.html

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187012167.238674.245840@22g2000hsm.googlegroups.com>
> You've also got "`", "'", "," and ·@".
>
> > HOFs could only do worse here.
>
> I actually think HOFs are irrelevant here: I haven't seen anything that
> needs to be parameterized over a function.

The macro definitions like define-*-encoder, and define-type-*-encoder
are meta-patterns for instructions with the same basic shape, but
different sets of values for the RHS. Each invocation of them (and
there are hundreds in all) expands to a full pattern match. As far as
I can tell the only way to express this without macros would be to
write them like this:

function define-type-0-encoder (base)
    function (dest source)
      match type-of(dest), type-of(source)
        ...
        | rm8, r8 -> emit base /rm
        | rm32, r32 -> emit base + 1 /rm
        | rm64, r64 -> emit base + 1 /rm
        | r8, rm8 -> emit base + 2 / r
        ...

This would also let you write an equivalent for define-jcc-encoders,
which generates a family of 16 functions whose names and opcodes are
given by the iteration variables of a loop.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bvke863u3f8d7@corp.supernews.com>
Jon Harrop wrote:
> Rayiner Hashem wrote:
>>> > (define-encoder mov (dest source)
>>> >   ((rm8 r8) (#x88 /rm))
>>> >   ((rm32 r32) (#x89 /rm))
>>> >   ((rm64 r64) (#x89 /rm))
>>> >   ((r8 rm8) (#x8A /r))
>>> >   ((r32 rm32) (#x8B /r))
>>> >   ((r64 rm64) (#x8B /r))
>>> >   ((r8 imm8) (#xB0 +r ib))
>>> >   ((r32 imm32) (#xB8 +r id))
>>> >   ((rm64 imm32) (#xC7 /0 id))
>>> >   ((r64 imm64) (#xB8 +r iq))
>>> >   ((rm8 imm8) (#xC6 /0 ib))
>>> >   ((rm32 imm32) (#xC7 /0 id)))
>
> Currying should alleviate that and pattern matching will greatly reduce
> syntactic overhead compared to Lisp's macros.

Incidentally, your code above is slightly redundant in a way that a pattern
matcher with or-patterns can factor. Specifically, rules are sometimes
repeated:

>>> >   ((rm32 r32) (#x89 /rm))
>>> >   ((rm64 r64) (#x89 /rm))

>>> >   ((r32 rm32) (#x8B /r))
>>> >   ((r64 rm64) (#x8B /r))

These could be written:

  | (R32|M32), R32 | (R64|M64), R64 -> emit2 0x89 rm

  | R32, (R32|M32) | R64, (R64|M64) -> emit2 0x8B r

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186977595.353380.293550@d55g2000hsg.googlegroups.com>
> Incidentally, your code above is slightly redundant in a way that a pattern
> matcher with or-patterns can factor. Specifically, rules are sometimes
> repeated:
>
> >>> >   ((rm32 r32) (#x89 /rm))
> >>> >   ((rm64 r64) (#x89 /rm))
> >>> >   ((r32 rm32) (#x8B /r))
> >>> >   ((r64 rm64) (#x8B /r))
>
> These could be written:
>
>   | (R32|M32), R32 | (R64|M64), R64 -> emit2 0x89 rm
>
>   | R32, (R32|M32) | R64, (R64|M64) -> emit2 0x8B r

The patterns are not the same even though the right-hand side is the
same. When a 64-bit operand is present, the code needs to emit a
prefix byte in the instruction stream. The macro-expander looks for 64-
bit specifiers in the left-hand-side of the pattern, and generates the
encoder appropriately. Obviously, you couldn't have known this without
the code.

Incidentally, you seem to have the general idea correct, and your
solution would work. However, it's not the solution I wanted to
express, specifically, your "emit2" function is being customized by
the expansion commands (right hand side of the pattern) at run-time,
and what I wanted was a function customized at compile-time. The
generated machine code will be the same, but the two implementations
describe different types of assemblers, specifically direct versus
table-driven.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c01j43gltute6@corp.supernews.com>
Rayiner Hashem wrote:
>>   | (R32|M32), R32 | (R64|M64), R64 -> emit2 0x89 rm
>>
>>   | R32, (R32|M32) | R64, (R64|M64) -> emit2 0x8B r
> 
> The patterns are not the same even though the right-hand side is the
> same. When a 64-bit operand is present, the code needs to emit a
> prefix byte in the instruction stream. The macro-expander looks for 64-
> bit specifiers in the left-hand-side of the pattern, and generates the
> encoder appropriately. Obviously, you couldn't have known this without
> the code.

Ok, so you'd probably pass 64 as another argument to "emit". You could write
the ML in the style of the Lisp macro, with more free form descriptions of
the patterns and rules and a custom matcher than understood them. However,
I would stick with leveraging the pattern matcher as much as possible
because it is so efficient.

> Incidentally, you seem to have the general idea correct, and your
> solution would work. However, it's not the solution I wanted to
> express, specifically, your "emit2" function is being customized by
> the expansion commands (right hand side of the pattern) at run-time,
> and what I wanted was a function customized at compile-time. The
> generated machine code will be the same, but the two implementations
> describe different types of assemblers, specifically direct versus
> table-driven.

Yes. I would expect the performance to be quite similar though.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187009120.508645.226160@e9g2000prf.googlegroups.com>
> I would stick with leveraging the pattern matcher as much as possible
> because it is so efficient.

You can make the macro emit arbitrarily efficient code (limited by the
problem, of course). My straightforward expansion is naive, you could
improve it by pulling the case statements to the very top, and
factoring common test prefixes into a decision tree. This is likely
the same code the ML pattern matcher would generate.

> Yes. I would expect the performance to be quite similar though.

Direct assemblers are generally more efficient. Interpreting pattern
tables at run-time involves a lot of data-driven hard-to-predict
branches that can essentially be partially-evaluated out by the macro-
expansion. Also, there is obviously less memory access (no searching
through a pattern table). On the other hand, the code will be a lot
bigger, so you might lose it in the i-cache.

The merits of direct versus table-driven assemblers are a bit
irrelevant here, though. I wanted to build a particular kind for
certain reasons, and Lisp made it easy. Telling me how to build a
different kind in ML doesn't really help me.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1e5acsenlu9c@corp.supernews.com>
Rayiner Hashem wrote:
>> I would stick with leveraging the pattern matcher as much as possible
>> because it is so efficient.
> 
> You can make the macro emit arbitrarily efficient code (limited by the
> problem, of course). My straightforward expansion is naive, you could
> improve it by pulling the case statements to the very top, and
> factoring common test prefixes into a decision tree. This is likely
> the same code the ML pattern matcher would generate.

That's exactly what Mark Tarver, Andre Thieme, Nathan Froyd, Pascal
Constanza, Dan Bensen and several other people said with regard to the
exact same problem in the symbolic simplifier:

  http://www.lambdassociates.org/studies/study10.htm

That page compares the least optimized OCaml to several optimized Lisp
implementations. As you can see, it is practically impossible to match the
performance of an ML pattern match compiler.

>> Yes. I would expect the performance to be quite similar though.
> 
> Direct assemblers are generally more efficient.

Comparing Lisp vs Lisp, the macro approach should not be slower, yes.

> Interpreting pattern 
> tables at run-time involves a lot of data-driven hard-to-predict
> branches that can essentially be partially-evaluated out by the macro-
> expansion.

Andre Thieme's first Lisp implementation of the symbolic simplifier use the
same style of final Lisp code that you are using (except he wrote it by
hand whereas yours is generated by a macro) and it was two orders of
magnitude slower than the OCaml. Specifically, tests were repeated and not
hoisted and dispatch was done linearly using COND.

> Also, there is obviously less memory access (no searching 
> through a pattern table).

Other way around: your code is doing a linear search and repeating tests
whereas the ML is precomputing the tests and performing dispatch in O(1)
using a statically optimized LUT. Moreover, your repeated tests involve
many more indirections, so it will be doing several times as many
incoherent memory accesses as the ML.

> On the other hand, the code will be a lot bigger, so you might lose it in
> the i-cache. 

Actually, I just did some simple tests and you only need constant
propagation and inlining to recover the macro-expanded code, both of which
are already done by OCaml. So you just need to write one generic curried
function and partially apply your constant arguments to get the partially
specialized implementations that you're after.

> The merits of direct versus table-driven assemblers are a bit
> irrelevant here, though. I wanted to build a particular kind for
> certain reasons, and Lisp made it easy. Telling me how to build a
> different kind in ML doesn't really help me.

I appreciate that you want to expand the code at compile time but I still do
not understand why you want to.

IMHO, the problem you are solving is ideally suited to ML. Your entire
program is largely pattern matching and testing based upon sets of possible
values inferred from the source code (static typing).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187038990.978087.253390@r34g2000hsd.googlegroups.com>
> That page compares the least optimized OCaml to several optimized Lisp
> implementations. As you can see, it is practically impossible to match the
> performance of an ML pattern match compiler.

There is nothing magical in the ML pattern match compiler. The
algorithm is published, and at the limiting case you're just going to
be back to comparing "which compiler has a better code-generator?" If
your Lisp has a built-in assembler, you can even compile down to that,
blowing away anything else in performance.

Anyway, in this particular program, the pattern matching might be
slower in Lisp, but it has a lot less to match. Consider the "mov"
pattern:

(define-encoder mov (dest source)
  ((rm8 r8) (#x88 /r))
  ((rm32 r32) (#x89 /r))
  ((rm64 r64) (#x89 /r))
  ((r8 rm8) (#x8A /r))
  ((r32 rm32) (#x8B /r))
  ((r64 rm64) (#x8B /r))
  ((r8 imm8) (#xB0 +r))
  ((r32 imm32) (#xB8 +r))
  ((r64 imm64) (#xB8 +r))
  ((rm8 imm8) (#xC6 /0 ib))
  ((rm32 imm32) (#xC7 /0 id))
  ((rm64 imm32) (#xC7 /0 id)))

You've shown how you can implement the selection has a pattern-match,
selecting the appropriate right-hand-side. Say you get something like
(:eax 1000), which selects the pattern:

((rm32 imm32) (#xC7 /0 id))

You expressed this as something like:

...
RM32, IMM32 -> emit #xC7 /0 id
...

What does "emit" look like? It's basically an interpreter for a
command language. It has to loop over the arguments passed to it, and
effect the appropriate actions for each command. In the inner-loop of
that interpreter, you have something like:

integer -> set-opcode field
| /0 -> set subcode field to 0, rm field to dest
| /1 -> set subcode field to 1, rm field to dest
| /2 -> set subcode field to 2, rm field to dest
...
| ib -> set byte immediate field to source
| id -> set dword immediate field to source
| iq -> set quadword immediate field to source
...

The Lisp code bypasses this matching completely. When it sees this
expansion:

(#xC7 /0 id)

It generates code that does exactly what needs to be done:

set opcode #xC7
set subcode 0
set rm to dest
set dword immediate to source

The resulting code is also easier to optimize, presenting more
statically-known constants and eliminating a lot of control-flow.

> Other way around: your code is doing a linear search and repeating tests
> whereas the ML is precomputing the tests and performing dispatch in O(1)
> using a statically optimized LUT. Moreover, your repeated tests involve
> many more indirections, so it will be doing several times as many
> incoherent memory accesses as the ML.

My macro-expansion is crappy :) I said you *could* generate
arbitrarily clever code, not that I did so. As for lookup tables, by
and large you don't want to use them for patterns this small. The
nature of direct and indirect branch prediction make a LUT a bad idea
until you get out to dozens of cases.

> Actually, I just did some simple tests and you only need constant
> propagation and inlining to recover the macro-expanded code, both of which
> are already done by OCaml.

I don't know what simplistic tests you're doing, but the O'Caml
compiler would essentially have to partially evaluate the interpreter
loop shown above, and to be practically comparable, would have to do
so even in the HOF case I showed in a previous post. I doubt it would
be possible even for this simple DSL (whose compiler is basically 150
LOC), and I'm sure it's not possible in the general case.

> IMHO, the problem you are solving is ideally suited to ML. Your entire
> program is largely pattern matching and testing based upon sets of possible
> values inferred from the source code (static typing).

The particular solution I want to express isn't even (easily)
expressible in ML.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c20m1ccbrgee9@corp.supernews.com>
Rayiner Hashem wrote:
>> That page compares the least optimized OCaml to several optimized Lisp
>> implementations. As you can see, it is practically impossible to match
>> the performance of an ML pattern match compiler.
> 
> There is nothing magical in the ML pattern match compiler. The
> algorithm is published, and at the limiting case you're just going to
> be back to comparing "which compiler has a better code-generator?" If
> your Lisp has a built-in assembler, you can even compile down to that,
> blowing away anything else in performance.

Are you really going to reimplement an optimizing pattern match compiler and
a native-code back end? Each would be an order of magnitude more code than
your entire project...

> Anyway, in this particular program, the pattern matching might be
> slower in Lisp, but it has a lot less to match. Consider the "mov"
> pattern:
> 
> (define-encoder mov (dest source)
>   ((rm8 r8) (#x88 /r))
>   ((rm32 r32) (#x89 /r))
>   ((rm64 r64) (#x89 /r))
>   ((r8 rm8) (#x8A /r))
>   ((r32 rm32) (#x8B /r))
>   ((r64 rm64) (#x8B /r))
>   ((r8 imm8) (#xB0 +r))
>   ((r32 imm32) (#xB8 +r))
>   ((r64 imm64) (#xB8 +r))
>   ((rm8 imm8) (#xC6 /0 ib))
>   ((rm32 imm32) (#xC7 /0 id))
>   ((rm64 imm32) (#xC7 /0 id)))
> 
> You've shown how you can implement the selection has a pattern-match,
> selecting the appropriate right-hand-side. Say you get something like
> (:eax 1000), which selects the pattern:
> 
> ((rm32 imm32) (#xC7 /0 id))
> 
> You expressed this as something like:
> 
> ...
> RM32, IMM32 -> emit #xC7 /0 id
> ...
> 
> What does "emit" look like? It's basically an interpreter for a
> command language. It has to loop over the arguments passed to it, and 
> effect the appropriate actions for each command. In the inner-loop of
> that interpreter, you have something like:
> 
> integer -> set-opcode field
> | /0 -> set subcode field to 0, rm field to dest
> | /1 -> set subcode field to 1, rm field to dest
> | /2 -> set subcode field to 2, rm field to dest
> ...
> | ib -> set byte immediate field to source
> | id -> set dword immediate field to source
> | iq -> set quadword immediate field to source
> ...

Right, but when the OCaml compiler comes across an expression in the source
code where some constants are partially applied to the emit function, it
inlines those constants to form a partially specialized "emit" function.

So:

  emit #xC7 /0 id

is statically optimized to:

  set opcode #xC7
  set subcode 0
  set rm to dest
  set dword immediate to source

without benefit of macros.

This only works because you're passing constants. In OCaml, if you pass a
function to a higher-order function then this optimization is not done.

You could probably get the same effect in Lisp by turning your macros into
functions and having them inlined.

> The Lisp code bypasses this matching completely. When it sees this
> expansion:
> 
> (#xC7 /0 id)
> 
> It generates code that does exactly what needs to be done:
> 
> set opcode #xC7
> set subcode 0
> set rm to dest
> set dword immediate to source
> 
> The resulting code is also easier to optimize, presenting more
> statically-known constants and eliminating a lot of control-flow.

I believe partial application gives you partial specialization in this
context, so the overhead is eliminated.

>> Other way around: your code is doing a linear search and repeating tests
>> whereas the ML is precomputing the tests and performing dispatch in O(1)
>> using a statically optimized LUT. Moreover, your repeated tests involve
>> many more indirections, so it will be doing several times as many
>> incoherent memory accesses as the ML.
> 
> My macro-expansion is crappy :) I said you *could* generate
> arbitrarily clever code, not that I did so.

In theory, sure.

> As for lookup tables, by 
> and large you don't want to use them for patterns this small. The
> nature of direct and indirect branch prediction make a LUT a bad idea
> until you get out to dozens of cases.

Then why is the symbolic simplifier much faster in OCaml even though each
match only has under half a dozen cases?

>> Actually, I just did some simple tests and you only need constant
>> propagation and inlining to recover the macro-expanded code, both of
>> which are already done by OCaml.
> 
> I don't know what simplistic tests you're doing, but the O'Caml
> compiler would essentially have to partially evaluate the interpreter
> loop shown above,

Exactly, yes. I believe you only need constant propagation and inlining to
get that in this case, both of which are implemented by OCaml.

> and to be practically comparable, would have to do 
> so even in the HOF case I showed in a previous post.

A function parameterized over another function would not be partially
specialized by OCaml. Some other compilers (e.g. MLton) almost certainly
would though.

> I doubt it would 
> be possible even for this simple DSL (whose compiler is basically 150
> LOC), and I'm sure it's not possible in the general case.

I hadn't thought about whether or not it is possible in the general case but
I'm quite sure it would work here, for two main reasons:

1. You curry the encoder function and perform evaluation at each partial
application.

2. The encoder function only has constant non-function values partially
applied to it.

>> IMHO, the problem you are solving is ideally suited to ML. Your entire
>> program is largely pattern matching and testing based upon sets of
>> possible values inferred from the source code (static typing).
> 
> The particular solution I want to express isn't even (easily)
> expressible in ML.

Ok.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187059739.667594.304860@b79g2000hse.googlegroups.com>
> Are you really going to reimplement an optimizing pattern match compiler and
> a native-code back end? Each would be an order of magnitude more code than
> your entire project...

Who says I need to implement a fully general pattern match compiler?
My patterns are very simple, and my productions are just some byte-
manipulation. Given a suitably integrated LAP, I'd be surprised if it
made the code even a third larger.

> Right, but when the OCaml compiler comes across an expression in the source
> code where some constants are partially applied to the emit function, it
> inlines those constants to form a partially specialized "emit" function.

I'd be impressed to see a compiler heroic enough to try and inline a
150 line function called that often. This is actually what you want in
this case, but it'd broken as a general heuristic.

> is statically optimized to:
>
>   set opcode #xC7
>   set subcode 0
>   set rm to dest
>   set dword immediate to source
>
> without benefit of macros.

This is doable in theory, but can O'Caml actually do it? If it can,
then kudos to it. However, understand that it won't be possible in the
general case. Basically, you're asking the compiler to derive a
compiler for a language given its interpreter, then use that compiler
to compile some constant source code.

> Then why is the symbolic simplifier much faster in OCaml even though each
> match only has under half a dozen cases?

I'd have to look at the code to see. On modern CPUs jump-tables have a
pretty behavior. Indirect jumps are hard to predict, and defeat code
pre-fetching pretty soundly. GCC seems to transition from a series of
cmp/jmp's to a lookup table at around 5 cases, which looks a bit low
to me, but is probably reasonable if you assume a uniform distribution
of key values. If your distribution is somewhat predictable, using cmp/
jmp's becomes a better idea out to many more cases.

> Exactly, yes. I believe you only need constant propagation and inlining to
> get that in this case, both of which are implemented by OCaml.

There are lot's of limitations to constant propagation. Ie: very few
optimizers will flow a constant through a data structure or closed-
over variable. Eg:

function make-foo (x)
  return function make-bar (y)
           if(x > 10)
             x = 10;
             return y + x;
           else
             return y;

fun = make-foo(3)

Traditional techniques for compiling functional programs will box "x"
since it's closed over and assigned, preventing the constant from
flowing through "x" and keeping the compiler from eliminating the
conditional. It's things like this that make it impossible for the
compiler to partially-evaluate your interpreter in the general case.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c2fthga7e08f1@corp.supernews.com>
Rayiner Hashem wrote:
> Who says I need to implement a fully general pattern match compiler?
> My patterns are very simple, and my productions are just some byte-
> manipulation. Given a suitably integrated LAP, I'd be surprised if it
> made the code even a third larger.

Let me know when you've finished your decision tree optimizer. :-)

>> is statically optimized to:
>>
>>   set opcode #xC7
>>   set subcode 0
>>   set rm to dest
>>   set dword immediate to source
>>
>> without benefit of macros.
> 
> This is doable in theory, but can O'Caml actually do it? If it can,
> then kudos to it. However, understand that it won't be possible in the
> general case.

Absolutely.

> Basically, you're asking the compiler to derive a 
> compiler for a language given its interpreter, then use that compiler
> to compile some constant source code.

In this case, I'm not sure this warrants being called an interpreter because
it applies a fixed and closed set of rewrite rules. In particular, there
are no looping or higher-order constructs which is precisely why the above
can work.

This is true to a lesser extent of the Minim example because it only has one
value type: int. That changed the way I optimized my code and made it no
longer representative of a real interpreter.

>> Then why is the symbolic simplifier much faster in OCaml even though each
>> match only has under half a dozen cases?
> 
> I'd have to look at the code to see. On modern CPUs jump-tables have a
> pretty behavior. Indirect jumps are hard to predict, and defeat code
> pre-fetching pretty soundly. GCC seems to transition from a series of
> cmp/jmp's to a lookup table at around 5 cases, which looks a bit low
> to me, but is probably reasonable if you assume a uniform distribution
> of key values. If your distribution is somewhat predictable, using cmp/
> jmp's becomes a better idea out to many more cases.

I appreciate there are many theoretical reasons why the OCaml should be slow
but, the fact is, the OCaml is by far the fastest of the interpreters.

>> Exactly, yes. I believe you only need constant propagation and inlining
>> to get that in this case, both of which are implemented by OCaml.
> 
> There are lot's of limitations to constant propagation. Ie: very few
> optimizers will flow a constant through a data structure or closed-
> over variable. Eg:
> 
> function make-foo (x)
>   return function make-bar (y)
>            if(x > 10)
>              x = 10;
>              return y + x;
>            else
>              return y;
> 
> fun = make-foo(3)
> 
> Traditional techniques for compiling functional programs will box "x"
> since it's closed over and assigned, preventing the constant from
> flowing through "x" and keeping the compiler from eliminating the
> conditional. It's things like this that make it impossible for the
> compiler to partially-evaluate your interpreter in the general case.

I think we'll need a real benchmark to see whether or not this works and/or
is worthwhile.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187105324.342827.167250@k79g2000hse.googlegroups.com>
> In this case, I'm not sure this warrants being called an interpreter because
> it applies a fixed and closed set of rewrite rules. In particular, there
> are no looping or higher-order constructs which is precisely why the above
> can work.

I gave some thought to it last night, and it occurs to me that the
precise form of the "emit" function would probably stymie the
optimizer. In Lisp, "emit" would look something like:

(defun emit (operands commands)
  (let ((ins (make-instruction)))
    (iter (for command in (preprocess-command-list commands))
      (cond
         ((integerp command) (place-opcode ins command))
         ((eql command '/r) (set-reg-field ins (get-next-operand
operands)))
         ((eql command '/rm) (set-rm-field ins (get-next-operand
operands)))
         ; more cases))
    (write-instruction-to-vector ins))

I don't think reducing this to a simple sequence is within the
capabilities of an existing optimizer. Having "operands" and
"commands" as lists is going to be enough to defeat most optimizers.
The call to preprocess-command-list will defeat any that are left,
since they won't be able to recognize that given a constant list it
returns a transformed, but still constant list. The mutable state
buried in place-opcode, get-next-operand, and set-*-field will just be
icing on the cake.

Incidentally, I'm not trying to purposefully defeat the optimizer
here, I'm just trying to present a relatively simple, but still
realistic example with all the hair that accompanies realistic
situations.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4bnld5367rc5@corp.supernews.com>
Rayiner Hashem wrote:
>> In this case, I'm not sure this warrants being called an interpreter
>> because it applies a fixed and closed set of rewrite rules. In
>> particular, there are no looping or higher-order constructs which is
>> precisely why the above can work.
> 
> I gave some thought to it last night, and it occurs to me that the
> precise form of the "emit" function would probably stymie the
> optimizer. In Lisp, "emit" would look something like:
> 
> (defun emit (operands commands)
>   (let ((ins (make-instruction)))
>     (iter (for command in (preprocess-command-list commands))
>       (cond
>          ((integerp command) (place-opcode ins command))
>          ((eql command '/r) (set-reg-field ins (get-next-operand
> operands)))
>          ((eql command '/rm) (set-rm-field ins (get-next-operand
> operands)))
>          ; more cases))
>     (write-instruction-to-vector ins))
> 
> I don't think reducing this to a simple sequence is within the
> capabilities of an existing optimizer. Having "operands" and
> "commands" as lists is going to be enough to defeat most optimizers.
> The call to preprocess-command-list will defeat any that are left,
> since they won't be able to recognize that given a constant list it
> returns a transformed, but still constant list. The mutable state
> buried in place-opcode, get-next-operand, and set-*-field will just be
> icing on the cake.
> 
> Incidentally, I'm not trying to purposefully defeat the optimizer
> here, I'm just trying to present a relatively simple, but still
> realistic example with all the hair that accompanies realistic
> situations.

That implementation is more representative of how an interpreter for a
higher-level target language must work because high-level languages can
have arbitrary numbers of operands and values from a non-trivial type
system. You are quite correct that your code does not convey static
information and will not be optimized as a consequence (it will remain as a
real interpreter).

Perhaps the hardest part of mastering statically-typed functional
programming languages is learning how you can leverage the static type
system to make your code shorter, faster and more robust. This function is
an ideal candidate for writing in a static style, not only in a static
language but also in a dynamic language that tries to perform static
optimizations, like SBCL-compiled Lisp.

Out of interest, here is the equivalent OCaml:

let emit operands commands =
  let ins = make_instruction() in
  List.iter
    (function
     | `Op command -> place_opcode ins command
     | `R -> set_reg_field ins (get_next_operand operands)
     | `RM -> set_rm_field ins (get_next_operand operands)
     ...)
    (List.map preprocess_command commands);
  write_instruction_to_vector ins

Note that the commands are explicitly boxed in OCaml. This is a reflection
of a needlessly dynamic implementation in this case.

Function composition is the obvious solution in this case. Consider a call
to the "emit" function written in your style:

  emit [`f; `g] [`Op 0x80; `R; `RM]

As you say, this will work. But what about this representation:

  emit (op 0x80 => r `f => rm `g)

where f => g denotes a form of function composition that applies f first and
then g whilst accumulating a result, something like:

# let ( => ) f g x = g(f x);;
val ( => ) : ('a -> 'b) -> ('b -> 'c) -> 'a -> 'c = <fun>

The same sequence of operations is conveyed but the compiler knows what
operations are called with what constant arguments and in what order.

What do the combinators look like? Actually, you already wrote them as
place_opcode, set_reg_field and set_rm_field. The only difference is that
they would need to accept their command/operand first and then the
instruction and a continuation.

What does "emit" look like? A lot simpler:

  let emit k = write_instruction_to_vector (k (make_instruction()))

You also had a global command preprocessing function in yours but I'm sure
you can just put that inside "op", "r", "rm" etc.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187140578.688942.289380@l22g2000prc.googlegroups.com>
> Function composition is the obvious solution in this case. Consider a call
> to the "emit" function written in your style:
>
>   emit [`f; `g] [`Op 0x80; `R; `RM]
>
> As you say, this will work. But what about this representation:
>
>   emit (op 0x80 => r `f => rm `g)
>
> where f => g denotes a form of function composition that applies f first and
> then g whilst accumulating a result, something like:
>
> # let ( => ) f g x = g(f x);;
> val ( => ) : ('a -> 'b) -> ('b -> 'c) -> 'a -> 'c = <fun>
>
> The same sequence of operations is conveyed but the compiler knows what
> operations are called with what constant arguments and in what order.
>
> What do the combinators look like? Actually, you already wrote them as
> place_opcode, set_reg_field and set_rm_field. The only difference is that
> they would need to accept their command/operand first and then the
> instruction and a continuation.

This is a clever solution. The notation is less compact than mine, but
just by a bit. But is it fast? You've turned simple intra-procedural
control flow into a lot of inter-procedural control flow, and you're
counting on the compiler to put it all back together. Maybe it does
--- that would be quite impressive.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vctc$bpf$2@online.de>
Rayiner Hashem schrieb:
> But is it fast? You've turned simple intra-procedural
> control flow into a lot of inter-procedural control flow, and you're
> counting on the compiler to put it all back together. Maybe it does
> --- that would be quite impressive.

I would expect it to - in a language that doesn't have aliasing, this 
kind of optimization is the norm (and not very difficult to do anyway).

It would also be somewhat difficult in an environment where functions 
are loaded incrementally. (One of the reasons why FPLs traditionally use 
compilers, and FPL interpreters traditionally don't optimize well.)

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187201579.208333.183610@a39g2000hsc.googlegroups.com>
> > But is it fast? You've turned simple intra-procedural
> > control flow into a lot of inter-procedural control flow, and you're
> > counting on the compiler to put it all back together. Maybe it does
> > --- that would be quite impressive.
>
> I would expect it to - in a language that doesn't have aliasing, this
> kind of optimization is the norm (and not very difficult to do anyway).

O'Caml has side-effects and mutable cells, so it is no easier to
optimize in this regard than Lisp. I'm not sure what heuristic
approaches are used for this problem in production compilers, but
basically the problem is just as hard as type analysis in dynamic
languages. I'm not sure if there is some sort of theoretical
equivalence, but you face the same problems, and the same algorithms
are applicable to both.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187194293.285864.172800@m37g2000prh.googlegroups.com>
On Aug 13, 6:09 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Are you really going to reimplement an optimizing pattern match compiler and

Of course not, because Harrop's own numbers show that isn't worth
doing.

Harrop has repeatedly told us that OCaml programs only spend 2-3% of
their time in its pattern matcher.  As a result, using a pattern
matcher that is 10x slower will add less than 20% to total run-time.

Harrop's numbers do suggest one question - what are they doing the
rest of the time?  The programs that he shows are almost completely
pattern matching.  Are they unrepresentative or is there some huge
overhead that he's not telling us about?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c6fnooch24m87@corp.supernews.com>
Andy Freeman wrote:
> Harrop has repeatedly told us that OCaml programs only spend 2-3% of
> their time in its pattern matcher.

When?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187194715.364870.270070@e9g2000prf.googlegroups.com>
On Aug 13, 6:09 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> This only works because you're passing constants. In OCaml, if you pass a
> function to a higher-order function then this optimization is not done.
>
> You could probably get the same effect in Lisp by turning your macros into
> functions and having them inlined.

That sentence demonstrates that Harrop doesn't understand macros.

In this case, the macro generates code that contains those constants.
The compiler is then free to do constant folding.

One can imagine a compiler that only folds constants when inlining a
function, but that's unlikely.

In other words, inlining isn't necessary.  In fact, macros can be used
to get much of the benefit of inlining from a compiler that doesn't
support it.

In other words, macros can be used to get around implementation issues
in addition to syntax issues.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c6fpo10alf789@corp.supernews.com>
Andy Freeman wrote:
> In other words, inlining isn't necessary.  In fact, macros can be used
> to get much of the benefit of inlining from a compiler that doesn't
> support it.

Now you're Greenspunning compiler optimizations as well...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-056746.21402315082007@news-europe.giganews.com>
In article <···············@corp.supernews.com>,
 Jon Harrop <···@ffconsultancy.com> wrote:

> Andy Freeman wrote:
> > In other words, inlining isn't necessary.  In fact, macros can be used
> > to get much of the benefit of inlining from a compiler that doesn't
> > support it.
> 
> Now you're Greenspunning compiler optimizations as well...

You should check out the definition of 'Greenspun's tenth rule'
some time before you use it again.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187276114.127382.87700@z24g2000prh.googlegroups.com>
On Aug 15, 10:49 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > In other words, inlining isn't necessary.  In fact, macros can be used
> > to get much of the benefit of inlining from a compiler that doesn't
> > support it.
>
> Now you're Greenspunning compiler optimizations as well...

Not at all.  I'm pointing out that lisp macros let one get around
certain limitations in certain compiler implementations.

Unlike Harrop's hobby horse, many lisp dialects have multiple
implementations.  One may choose a given implementation to take
advantage of certain properties, but wish to overcome some of its
limitations.  Being able to do so is a good thing.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187276672.704366.235140@i13g2000prf.googlegroups.com>
On Aug 15, 10:49 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Now you're Greenspunning compiler optimizations as well...

While Harrop may need an argument, he's consistently misusing
"greenspun".

Greenspun's law is that every large program contains an incomplete/bug-
ridden implementation of a significant part of common lisp.

While every OCaml program, or every program that Harrop writes, may be
dominated by pattern matching, that's simply not true of every lisp
program, not to mention every large program.  It's not even true of a
significant minority.

Yes, one can use pattern matching to do anything, but one can also use
linear algebra to do anything.  I note that OCaml requires libraries
or roll-your-own to do everything with linear algebra.  Perhaps Harrop
will go tell the Apl folks why that's acceptable.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c91dq56e9a29@corp.supernews.com>
Andy Freeman wrote:
> Greenspun's law is that every large program contains an incomplete/bug-
> ridden implementation of a significant part of common lisp.

More recently:

  Every large Lisp program contains an incomplete/bug-ridden implementation
of a significant part of OCaml/Haskell.

Look at Rayiner's pattern matcher and static type checker, for example.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187291689.998580.198540@50g2000hsm.googlegroups.com>
> Look at Rayiner's pattern matcher and static type checker, for example.

Looks like you still haven't figured out what the code is doing :)
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187292239.809834.40430@a39g2000hsc.googlegroups.com>
>   Every large Lisp program contains an incomplete/bug-ridden implementation
> of a significant part of OCaml/Haskell.
>
> Look at Rayiner's pattern matcher and static type checker, for example.
>

If some trivial pattern-matching* is "a significant part of OCaml",
then what the heck is all the rest of the code in the source tree for?

*) For those in the audience --- the functions in the macro that
generate the "matching" is 8 lines of code, one of them commented-out,
and one function for classifying operands called at runtime which
would still be necessary in the O'Caml code. There is other "matching"
in the code, but those are trivial applications of COND or ECASE, both
of which are built-in.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13capqp2o9kftd8@corp.supernews.com>
Rayiner Hashem wrote:
> *) For those in the audience --- the functions in the macro that
> generate the "matching" is 8 lines of code, one of them commented-out,
> and one function for classifying operands called at runtime which
> would still be necessary in the O'Caml code. There is other "matching"
> in the code, but those are trivial applications of COND or ECASE, both
> of which are built-in.

$ wc *.lisp
  207   564  5448 assembler.lisp
 1168  4281 35259 encoders.lisp
    9    25   293 package.lisp
  333  1132  8992 testsuite.lisp
   47   141  1230 utilities.lisp
 1764  6143 51222 total
$ grep cond * | wc
     31     145    1491
$ grep ecase * | wc
      8      31     313
$ grep "define-encoder" * | wc
     53     230    2686

Excluding nested "if"s and "when"s and membership tests (e.g. on
defparameter) that is still a pattern match every 19 lines of code. This is
not representation of general code because your application is a rewriter.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187356316.021312.61020@22g2000hsm.googlegroups.com>
> Excluding nested "if"s and "when"s and membership tests (e.g. on
> defparameter) that is still a pattern match every 19 lines of code. This is
> not representation of general code because your application is a rewriter.

Your claim was:

Every large Lisp program contains an incomplete/bug-ridden
_implementation_
of a significant part of OCaml/Haskell.

(emphasis mine)

The only code in there _implementing_ pattern matching is 8 lines in
the define-encoder macro. ECASE and COND are built-in language
features. The rest are uses of matching, which all would've been
required in an O'Caml version.

Incidentally, your numbers are probably conservative by a factor of
two. All the define-*-encoder calls expand to pattern matches, and
there are probably close to 100 of those.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cc662pdd6la71@corp.supernews.com>
Rayiner Hashem wrote:
> Your claim was:
> 
> Every large Lisp program contains an incomplete/bug-ridden
> _implementation_
> of a significant part of OCaml/Haskell.
> 
> (emphasis mine)
> 
> The only code in there _implementing_ pattern matching is 8 lines in
> the define-encoder macro.

Hence the "incomplete". You claim was for a high-performance assembler.
Let's see how long your pattern matcher gets when you optimize it...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8g3aynp4fw.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> I appreciate that you want to expand the code at compile time but I still do
> not understand why you want to.

I think this is basically a question of philosophy. Some people want
to embed complete DSLs in their language. Instead of putting DSLs in,
say separate files and compileing them to modules (as I would do in
the extreme case if needed in OCaml) they want to write

 (FOO
    ...)

so that everything at '...' can have a meaning and a syntax not the
least reminiscent of the language surrounding this block. If you want
something like this, macros are unbeatable. Personally I think there
is a number of arguments to use this means of expression sparingly
(like that the semantics changing totally within FOO effectively means
that you need to read all the macro docs to even understand what the
constructs are and what is just syntax etc). 

The traditional philosophy I'd prefer is more like lex+yacc: If one
needs a DSL one puts it into a separate files and compiles it to a
module in the target language. IMHO the furthers containing this kind
of tricks and keep them from propagating over the whole program (and
if you have the latter, you effectively have a different language and
that is bad news for maintenance).

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186978442.099454.13300@i13g2000prf.googlegroups.com>
Btw, I feel kinda bad for throwing you code in a syntax that I didn't
specify upfront, without code to boot. The code is available here:
http://www.prism.gatech.edu/~gtg990h/amd64-asm.tar.gz

The readme describes the syntax of the assembly source, and there is a
comment in encoders.lisp (around line 278) describing the syntax of
the instruction pattern language. If you have a Lisp compiler, you can
use the macro-expander to get an idea of what how the macros work.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5i4acvF3musvmU1@mid.individual.net>
Joachim Durchholz wrote:
> Rainer Joswig schrieb:
>> In article <············@online.de>,
>>  Joachim Durchholz <··@durchholz.org> wrote:
>>
>>> Because it's not syntax that's the difference. You don't need macros 
>>> if you don't need to control when an expression is evaluated.
>>>
>>> Syntactically, both macros and function calls are just S-expressions 
>>> in Lisp. It's the semantics that differs, the time at which the 
>>> expressions are evaluated.
>>> You don't need to make that difference if the semantics of an 
>>> expression is independent of when it is evaluated.
>>
>> So, macros are not about different evaluation time. Macros
>> are source transforming functions. They can generate
>> arbitrary source. The result of the source transformation
>> (new source) runs at runtime, just as the function would.
>> So the macro expansion is an additional step.
> 
> Yes, I know that, but what does it buy me?
> 
> Let me try a concrete example, straight from Practical Common Lisp by 
> Peter Seibel that somebody was kind enough to direct my attention to.
> 
> WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
> higher-order functions. They'd take functions as parameters for the 
> various conditions and bodies, and return a function that, when run, 
> executes the constructed conditional resp. loop.

This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
implemented as higher-order functions. They don't take functions as 
parameters, they take code fragments as parameters.

> To make that smooth, you'd need as little syntactic overhead as 
> possible. Haskell's syntax is minimal enough for that IMHO.

Macros are not about making syntax smooth, they are about hiding 
implementation details. If you implement the functional equivalents of 
WHEN, UNLESS, DOLIST and DOTIMES, you have to impose the requirement on 
the call sites that they need to wrap code fragments in closures such 
that their evaluation is correctly delayed and can be controlled by 
those functions. This need to wrap code fragments in closures is only 
there for the functionality that _should_ actually be the job of the 
called functions. So higher-order functions actually clutter client code 
with otherwise (!) uninteresting implementation details.

What is good about syntactic abstractions, as provided by macros, is 
that you can change the implementation underneath without changing the 
call sites. For example, you could expand into goto statements instead 
of higher-order functions to control evaluation of code fragments by 
jumping around them, to avoid the overhead of creating closures 
completely. Such a change of implementation strategy is not possible 
with functional abstractions because you are essentially stuck with 
higher-order functions and closures, and don't have anything else at 
your disposal.

> I'm not sure how much of that argument transfers to more complicated 
> macros.

Delaying evaluation of parts of a macro invocation is only one of the 
many possible uses for macros. With macros, you have complete access to 
the whole macro invocation and can fully deconstruct all the code 
fragments in whatever way you like. Closures, functions, and function 
arguments in lazy languages, are in contrast typically opaque: You 
cannot modify them, and you typically cannot even inspect them. So with 
macros, you have a clear increase in expressive power.

This is what makes macros especially suitable for embedding 
domain-specific languages: You are not restricted by the preferred 
programming style of the host language at all. In a pure functional 
language, your domain-specific language will essentially always remain a 
functional language. In a pure object-oriented language, your 
domain-specific language will essentially always remain an 
object-oriented language. And so on. As soon as you have macros, you can 
always break out of whatever the underlying language favors.

Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-B169BA.01315711082007@news-europe.giganews.com>
In article <···············@mid.individual.net>,
 Pascal Costanza <··@p-cos.net> wrote:

> Joachim Durchholz wrote:
> > Rainer Joswig schrieb:
> >> In article <············@online.de>,
> >>  Joachim Durchholz <··@durchholz.org> wrote:
> >>
> >>> Because it's not syntax that's the difference. You don't need macros 
> >>> if you don't need to control when an expression is evaluated.
> >>>
> >>> Syntactically, both macros and function calls are just S-expressions 
> >>> in Lisp. It's the semantics that differs, the time at which the 
> >>> expressions are evaluated.
> >>> You don't need to make that difference if the semantics of an 
> >>> expression is independent of when it is evaluated.
> >>
> >> So, macros are not about different evaluation time. Macros
> >> are source transforming functions. They can generate
> >> arbitrary source. The result of the source transformation
> >> (new source) runs at runtime, just as the function would.
> >> So the macro expansion is an additional step.
> > 
> > Yes, I know that, but what does it buy me?
> > 
> > Let me try a concrete example, straight from Practical Common Lisp by 
> > Peter Seibel that somebody was kind enough to direct my attention to.
> > 
> > WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
> > higher-order functions. They'd take functions as parameters for the 
> > various conditions and bodies, and return a function that, when run, 
> > executes the constructed conditional resp. loop.
> 
> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
> implemented as higher-order functions. They don't take functions as 
> parameters, they take code fragments as parameters.

For WHEN you would write

(defun my-when (condition function)
   (if condition
       (funcall function)))

code would be

(my-when (> foo bar)
   (lambda ()
      (baz)))

CL evaluation is strict. Like SML.

Haskell's evaluation is lazy. Evaluation is done when
needed.

-- 
http://lispm.dyndns.org
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0absyq478.fsf@gemini.franz.com>
Rainer Joswig <······@lisp.de> writes:

>> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
>> implemented as higher-order functions. They don't take functions as 
>> parameters, they take code fragments as parameters.
>
> For WHEN you would write
>
> (defun my-when (condition function)
>    (if condition
>        (funcall function)))
>
> code would be
>
> (my-when (> foo bar)
>    (lambda ()
>       (baz)))
>
> CL evaluation is strict. Like SML.
>
> Haskell's evaluation is lazy. Evaluation is done when
> needed.

How would you implement OR?

Cleanly?

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9j7nu$ahq$1@registered.motzarella.org>
Duane Rettig schrieb:

>> Haskell's evaluation is lazy. Evaluation is done when
>> needed.
> 
> How would you implement OR?
> 
> Cleanly?

I don't think this is heavily clean, but maybe half acceptable:

(defun ohr (&rest exps)
   (let ((result (eval (funcall (first exps)))))
     (if result
         result
         (when (rest exps)
           (apply #'ohr (rest exps))))))

This is not very tested and I guess we might come up fast with
cases it doesn't handle.


Andr�
-- 
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187021627.385428.231160@z24g2000prh.googlegroups.com>
On Aug 10, 7:43 pm, Andr� Thieme <address.good.until.
···········@justmail.de> wrote:
> Duane Rettig schrieb:
> > How would you implement OR?
> > Cleanly?
>
> I don't think this is heavily clean, but maybe half acceptable:
>
> (defun ohr (&rest exps)
>    (let ((result (eval (funcall (first exps)))))
>      (if result
>          result
>          (when (rest exps)
>            (apply #'ohr (rest exps))))))

That looks like it's evaluating its arguments multiple times in
various ways.  If the arguments are being passed lazily, you need some
way to cause the first argument to be evaluated once.  If they're not,
it's just wrong because the key part of "OR" is that the value of one
argument may stop a subsequent argument from being evaluated at all.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-21CE66.06201611082007@news-europe.giganews.com>
In article <··············@gemini.franz.com>,
 Duane Rettig <·····@franz.com> wrote:

> Rainer Joswig <······@lisp.de> writes:
> 
> >> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
> >> implemented as higher-order functions. They don't take functions as 
> >> parameters, they take code fragments as parameters.
> >
> > For WHEN you would write
> >
> > (defun my-when (condition function)
> >    (if condition
> >        (funcall function)))
> >
> > code would be
> >
> > (my-when (> foo bar)
> >    (lambda ()
> >       (baz)))
> >
> > CL evaluation is strict. Like SML.
> >
> > Haskell's evaluation is lazy. Evaluation is done when
> > needed.
> 
> How would you implement OR?

CL:OR?

> 
> Cleanly?

What is 'cleanly'?
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0d4xuoydh.fsf@gemini.franz.com>
Rainer Joswig <······@lisp.de> writes:

> In article <··············@gemini.franz.com>,
>  Duane Rettig <·····@franz.com> wrote:
>
>> Rainer Joswig <······@lisp.de> writes:
>> 
>> >> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
>> >> implemented as higher-order functions. They don't take functions as 
>> >> parameters, they take code fragments as parameters.
>> >
>> > For WHEN you would write
>> >
>> > (defun my-when (condition function)
>> >    (if condition
>> >        (funcall function)))
>> >
>> > code would be
>> >
>> > (my-when (> foo bar)
>> >    (lambda ()
>> >       (baz)))
>> >
>> > CL evaluation is strict. Like SML.
>> >
>> > Haskell's evaluation is lazy. Evaluation is done when
>> > needed.
>> 
>> How would you implement OR?
>
> CL:OR?

Yes.  Andre already presented one.

>> Cleanly?
>
> What is 'cleanly'?

Well, I misunderstood the problem being solved; actually it is a
tradeoff, already discussed before; a HOF version of when or or cannot
accept code fragments, and thus can never be exactly the same as their
CL counterparts; they must accept functions.  Would this be a good
thing?  I think for functional programming, yes, but it cetainly would
make the language a lot uglier - note that we've been concentrating on
the implementations of the HOFs, and not the calls to them.

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <nnir7m7y7q.fsf@hod.lan.m-e-leypold.de>
Duane Rettig wrote:

> Rainer Joswig <······@lisp.de> writes:
>
>> In article <··············@gemini.franz.com>,
>>  Duane Rettig <·····@franz.com> wrote:
>>
>>> Rainer Joswig <······@lisp.de> writes:
>>> 
>>> >> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
>>> >> implemented as higher-order functions. They don't take functions as 
>>> >> parameters, they take code fragments as parameters.
>>> >
>>> > For WHEN you would write
>>> >
>>> > (defun my-when (condition function)
>>> >    (if condition
>>> >        (funcall function)))
>>> >
>>> > code would be
>>> >
>>> > (my-when (> foo bar)
>>> >    (lambda ()
>>> >       (baz)))
>>> >
>>> > CL evaluation is strict. Like SML.
>>> >
>>> > Haskell's evaluation is lazy. Evaluation is done when
>>> > needed.
>>> 
>>> How would you implement OR?
>>
>> CL:OR?
>
> Yes.  Andre already presented one.
>
>>> Cleanly?
>>
>> What is 'cleanly'?
>
> Well, I misunderstood the problem being solved; actually it is a
> tradeoff, already discussed before; a HOF version of when or or cannot
> accept code fragments, and thus can never be exactly the same as their
> CL counterparts; they must accept functions.  

From a practical point of view, the difference I see, is that when
using HOFs the meaning of all identifiers is already fixed by the
language itself. Macros instead can introduce new meanings for keyword
in their arguments.

Like:

  (CLASS ...

     ( ... self ... ))

has a special meaning here (and is not bound to anything found outside
the class block)

This is something not possible with HOFs (though one could require
that self is passed to a function (fun self -> ...).

> Would this be a good thing?  

Depends. Jos observation that macros are not much missed in languages
like Haskell or OCaml still stands. My impression is that macros just
have their strength in defining new control structures in a broader
sense (that is constructs that control the execution of program parts
or even create completely new contructs that cannot be easily mapped
to one closure). But it seems that there is only a limited number od
useful constructs of that kind (classes and objects come into mind as
advanced loops or unwind-protect). But in FPs iteration is often
better controlled by folding over something so the advantage to define
different loops is lost, classes are often supported natively. At the
end that might explained why they aren't missed so much (despite the
fact that they might be useful: I'm thinking about R. Warnocks
contribution to this thread).


Regards -- Markus
From: Thomas F. Burdick
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186992806.583707.30300@q75g2000hsh.googlegroups.com>
On Aug 11, 5:59 pm, Duane Rettig <·····@franz.com> wrote:
> Rainer Joswig <······@lisp.de> writes:
> > In article <··············@gemini.franz.com>,
> >  Duane Rettig <·····@franz.com> wrote:
>
> >> Rainer Joswig <······@lisp.de> writes:
>
> >> >> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be
> >> >> implemented as higher-order functions. They don't take functions as
> >> >> parameters, they take code fragments as parameters.
>
> >> > For WHEN you would write
>
> >> > (defun my-when (condition function)
> >> >    (if condition
> >> >        (funcall function)))
>
> >> > code would be
>
> >> > (my-when (> foo bar)
> >> >    (lambda ()
> >> >       (baz)))
>
> >> > CL evaluation is strict. Like SML.
>
> >> > Haskell's evaluation is lazy. Evaluation is done when
> >> > needed.
>
> >> How would you implement OR?
>
> > CL:OR?
>
> Yes.  Andre already presented one.
>
> >> Cleanly?
>
> > What is 'cleanly'?
>
> Well, I misunderstood the problem being solved; actually it is a
> tradeoff, already discussed before; a HOF version of when or or cannot
> accept code fragments, and thus can never be exactly the same as their
> CL counterparts; they must accept functions.  Would this be a good
> thing?  I think for functional programming, yes, but it cetainly would
> make the language a lot uglier - note that we've been concentrating on
> the implementations of the HOFs, and not the calls to them.

A lot uglier?  A little uglier, but if the language was designed from
the ground up with the assumption that the user will want trivial
access to lambda, I don't think much uglier.  I'm thinking of
Smalltalk here, which has a very functional approach in a lot of
things.  I don't think in the following:

  foo > bar
    ifTrue: [self baz]

that the code block is particularly ugly.  There's always the
possibility of course that the pretty macalicious Ambrai environment
is eating my brain...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqpn688pibp44@corp.supernews.com>
Duane Rettig wrote:
> How would you implement OR?

An n-ary "or" in a lazy language:

  my_or [] = false
  my_or (true::_) = true
  my_or (false::t) = my_or t

in a strict language:

  let rec my_or = function
    | [] -> false
    | (f, x)::t -> if f x then true else my_or t

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: =?ISO-8859-15?Q?Andr=E9_Thieme?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9j59c$3ki$1@registered.motzarella.org>
Rainer Joswig schrieb:

> For WHEN you would write
> 
> (defun my-when (condition function)
>    (if condition
>        (funcall function)))
> 
> code would be
> 
> (my-when (> foo bar)
>    (lambda ()
>       (baz)))

This is what most macros do: hide one lambda (or some more).


> CL evaluation is strict. Like SML.
> 
> Haskell's evaluation is lazy. Evaluation is done when
> needed.

And the nice part about this is that we would call it ca. like this:

(my-when (> foo bar)
   (baz))


Andr�
-- 
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-804361.06135611082007@news-europe.giganews.com>
In article <············@registered.motzarella.org>,
 Andr� Thieme <······························@justmail.de> wrote:

> Rainer Joswig schrieb:
> 
> > For WHEN you would write
> > 
> > (defun my-when (condition function)
> >    (if condition
> >        (funcall function)))
> > 
> > code would be
> > 
> > (my-when (> foo bar)
> >    (lambda ()
> >       (baz)))
> 
> This is what most macros do: hide one lambda (or some more).

Above sentence makes no sense.
 
> 
> > CL evaluation is strict. Like SML.
> > 
> > Haskell's evaluation is lazy. Evaluation is done when
> > needed.
> 
> And the nice part about this is that we would call it ca. like this:
> 
> (my-when (> foo bar)
>    (baz))
> 
> 
> Andr�
> --
From: =?ISO-8859-15?Q?Andr=E9_Thieme?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9o2vt$f1t$1@registered.motzarella.org>
Rainer Joswig schrieb:
> In article <············@registered.motzarella.org>,
>  Andr� Thieme <······························@justmail.de> wrote:
> 
>> Rainer Joswig schrieb:
>>
>>> For WHEN you would write
>>>
>>> (defun my-when (condition function)
>>>    (if condition
>>>        (funcall function)))
>>>
>>> code would be
>>>
>>> (my-when (> foo bar)
>>>    (lambda ()
>>>       (baz)))
>> This is what most macros do: hide one lambda (or some more).
> 
> Above sentence makes no sense.

You want to explain why?
And besides of that, would you disagree?

I think it makes sense to mention that from the view of abstraction
macros are doing really most of the time that - saving the programmer
to use lambdas.
At least this is the case for when, unless, case macros, most (if not
all) with- macros and several more.


Andr�
-- 
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6C456F.01315713082007@news-europe.giganews.com>
In article <············@registered.motzarella.org>,
 Andr� Thieme <······························@justmail.de> wrote:

> Rainer Joswig schrieb:
> > In article <············@registered.motzarella.org>,
> >  Andr� Thieme <······························@justmail.de> wrote:
> > 
> >> Rainer Joswig schrieb:
> >>
> >>> For WHEN you would write
> >>>
> >>> (defun my-when (condition function)
> >>>    (if condition
> >>>        (funcall function)))
> >>>
> >>> code would be
> >>>
> >>> (my-when (> foo bar)
> >>>    (lambda ()
> >>>       (baz)))
> >> This is what most macros do: hide one lambda (or some more).
> > 
> > Above sentence makes no sense.
> 
> You want to explain why?
> And besides of that, would you disagree?
> 
> I think it makes sense to mention that from the view of abstraction
> macros are doing really most of the time that - saving the programmer
> to use lambdas.
> At least this is the case for when, unless, case macros, most (if not
> all) with- macros and several more.

No, in Common Lisp these don't expand into functional forms.
They expand into simpler imperative forms. For conditionals
the basic form is IF (a so-called special form).

No need for lambdas.

There are also lots of macros not falling into this
scheme: LOOP, DEFVAR, DEFCLASS, INCF, ...

> 
> 
> Andr�
> --

-- 
http://lispm.dyndns.org
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bvg4oh9m9lod1@corp.supernews.com>
Rainer Joswig wrote:
> No, in Common Lisp these don't expand into functional forms.
> They expand into simpler imperative forms. For conditionals
> the basic form is IF (a so-called special form).
> 
> No need for lambdas.

I believe that is exactly what Andre was saying: without macros you would
use higher-order functions and pass functions to them. In the special case
of an eager language and a lazy subexpression with no free variables, you
would have the syntactic overhead of a lambda.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i57bmF3mrshaU1@mid.individual.net>
Rainer Joswig wrote:
> In article <···············@mid.individual.net>,
>  Pascal Costanza <··@p-cos.net> wrote:
> 
>> Joachim Durchholz wrote:
>>> Rainer Joswig schrieb:
>>>> In article <············@online.de>,
>>>>  Joachim Durchholz <··@durchholz.org> wrote:
>>>>
>>>>> Because it's not syntax that's the difference. You don't need macros 
>>>>> if you don't need to control when an expression is evaluated.
>>>>>
>>>>> Syntactically, both macros and function calls are just S-expressions 
>>>>> in Lisp. It's the semantics that differs, the time at which the 
>>>>> expressions are evaluated.
>>>>> You don't need to make that difference if the semantics of an 
>>>>> expression is independent of when it is evaluated.
>>>> So, macros are not about different evaluation time. Macros
>>>> are source transforming functions. They can generate
>>>> arbitrary source. The result of the source transformation
>>>> (new source) runs at runtime, just as the function would.
>>>> So the macro expansion is an additional step.
>>> Yes, I know that, but what does it buy me?
>>>
>>> Let me try a concrete example, straight from Practical Common Lisp by 
>>> Peter Seibel that somebody was kind enough to direct my attention to.
>>>
>>> WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
>>> higher-order functions. They'd take functions as parameters for the 
>>> various conditions and bodies, and return a function that, when run, 
>>> executes the constructed conditional resp. loop.
>> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
>> implemented as higher-order functions. They don't take functions as 
>> parameters, they take code fragments as parameters.
> 
> For WHEN you would write
> 
> (defun my-when (condition function)
>    (if condition
>        (funcall function)))
> 
> code would be
> 
> (my-when (> foo bar)
>    (lambda ()
>       (baz)))

Yes, but you cannot implement a function that does "the right thing" for 
the following form (in a strict language):

(my-when (> foo bar)
   (baz))


"Abstraction" means that I can keep an interface while changing the 
implementation behind the scenes, without affecting client code. In 
order to make the my-when abstraction work without macros, you have to 
change the interface and require clients to pass parts of the invocation 
wrapped in lambdas. Hence, it's not really an abstraction. Or to be more 
precise: Some aspects of the implementation leak through, they cannot be 
abstracted away without macros.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x1weamqeb.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> Yes, but you cannot implement a function that does "the right thing"
> for the following form (in a strict language):
> 
> (my-when (> foo bar)
>    (baz))
> 
> "Abstraction" means that I can keep an interface while changing the
> implementation behind the scenes, without affecting client code. 

But macros also break abstraction.  As (iirc) you or Rainer already
mentioned, you cannot pass a macro to a HOF.  Although, I don't know
if that's just a CL limitation.  At least in the cheesy dynamically
scoped Lisps I've used, you could do it (macros were simply a special
lambda that got evaluated twice, the first time with unevaluated args).

It's all a huge mess though.  I have the impression Scheme's hygenic
macros are much cleaner than what CL's do.  I've never used them though.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-01D2F2.10493411082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Pascal Costanza <··@p-cos.net> writes:
> > Yes, but you cannot implement a function that does "the right thing"
> > for the following form (in a strict language):
> > 
> > (my-when (> foo bar)
> >    (baz))
> > 
> > "Abstraction" means that I can keep an interface while changing the
> > implementation behind the scenes, without affecting client code. 
> 
> But macros also break abstraction.  As (iirc) you or Rainer already
> mentioned, you cannot pass a macro to a HOF.  Although, I don't know
> if that's just a CL limitation.  At least in the cheesy dynamically
> scoped Lisps I've used, you could do it (macros were simply a special
> lambda that got evaluated twice, the first time with unevaluated args).
> 
> It's all a huge mess though.  I have the impression Scheme's hygenic
> macros are much cleaner than what CL's do.  I've never used them though.

Schemer: "Buddha is small, clean, and serious."
Lispnik: "Buddha is big, has hairy armpits, and laughs."

-- 
http://lispm.dyndns.org
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i5f71F3mq9onU1@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>> Yes, but you cannot implement a function that does "the right thing"
>> for the following form (in a strict language):
>>
>> (my-when (> foo bar)
>>    (baz))
>>
>> "Abstraction" means that I can keep an interface while changing the
>> implementation behind the scenes, without affecting client code. 
> 
> But macros also break abstraction.  As (iirc) you or Rainer already
> mentioned, you cannot pass a macro to a HOF.

You're right, this is definitely a limitation of macros.

> Although, I don't know if that's just a CL limitation.

Macros were added to Lisp 1.5 in the 1960's, as a simplification of 
FEXPRs. To the best of my knowledge, the macro systems in most or even 
all Lisp dialects have this limitation. There was a paper about 
first-class macros relatively recently, but the approach presented 
either goes in the wrong direction IMHO, or I don't completely get it 
(or both ;).

> At least in the cheesy dynamically
> scoped Lisps I've used, you could do it (macros were simply a special
> lambda that got evaluated twice, the first time with unevaluated args).

FEXPRs in Lisp 1.5 (or nlambda in Interlisp) are functions that don't 
evaluate their arguments, but receive them as unevaluated s-expressions. 
If you can the get hold of the (lexical or dynamic) environment of the 
call site, you can deconstruct the s-expressions and evaluate them again 
in the correct environments. The disadvantage of FEXPRs/nlambda is that 
it makes it basically impossible to statically compile a Lisp dialect 
with such a feature, and dynamic compilation techniques for such Lisp 
dialects haven't been tried yet, if I understand correctly.

The advantage of macros is that they give you a subset of FEXPRs/nlambda 
that can be fully processed at compile time, without creating any 
overhead for runtime. The understanding in the Lisp community seems to 
be that macros cover the most interesting uses of FEXPRs/nlambda, and 
that the remaining uses are not interesting enough. (Personally, I am 
not convinced by such arguments, I would prefer a language in which 
there were FEXPRs/nlambda or similar features available, but I guess 
most Lispers and Schemers would disagree here.)

> It's all a huge mess though.  I have the impression Scheme's hygenic
> macros are much cleaner than what CL's do.  I've never used them though.

Macro hygiene is not really a related topic here. Hygienic macro systems 
may give you better encapsulation to a certain degree, but apart from 
that, hygienic and non-hygienic macro systems essentially have the same 
expressive power. (R5RS Scheme's syntax-rules don't have the full 
expressive power of more complete macro systems, but that's not a 
conceptual restriction, but rather a deliberate design choice of the 
designers of syntax-rules.)

You may prefer either hygienic or non-hygienic macro systems for various 
reasons, but the limitation that you cannot pass macros as first-class 
values remains the same.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xbqdetl6a.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> The understanding in the Lisp
> community seems to be that macros cover the most interesting uses of
> FEXPRs/nlambda, and that the remaining uses are not interesting
> enough. (Personally, I am not convinced by such arguments, I would
> prefer a language in which there were FEXPRs/nlambda or similar
> features available, but I guess most Lispers and Schemers would
> disagree here.)

I remember reading that Steele and Sussman originally considered
making Scheme use lazy evaluation by default, but decided to stay with
strict evaluation for some relatively mundane reason.  I think this is
mentioned in SPJ's retrospective paper on the history of Haskell,
saying the whole development of functional programming might have been
a lot different if Scheme had been nonstrict from the start.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i5ht6F3ndnecU1@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>> The understanding in the Lisp
>> community seems to be that macros cover the most interesting uses of
>> FEXPRs/nlambda, and that the remaining uses are not interesting
>> enough. (Personally, I am not convinced by such arguments, I would
>> prefer a language in which there were FEXPRs/nlambda or similar
>> features available, but I guess most Lispers and Schemers would
>> disagree here.)
> 
> I remember reading that Steele and Sussman originally considered
> making Scheme use lazy evaluation by default, but decided to stay with
> strict evaluation for some relatively mundane reason.

They found it easier to implement lazy evaluation in a strict language, 
rather than to implement strict evaluation in a lazy language.

Which is true: delay/force is more straightforward, at least from an 
implementation point of view, than monads or uniqueness typing. Strict 
evaluation in a lazy language was developed much later, and was by no 
means trivial.

> I think this is
> mentioned in SPJ's retrospective paper on the history of Haskell,
> saying the whole development of functional programming might have been
> a lot different if Scheme had been nonstrict from the start.

Of course. There is also a classic paper by Dan Friedman, "CONS should 
not evaluate its arguments", which came out before Scheme and already 
anticipates the involved issues.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-F50EA4.12242011082007@news-europe.giganews.com>
In article <···············@mid.individual.net>,
 Pascal Costanza <··@p-cos.net> wrote:

> The advantage of macros is that they give you a subset of FEXPRs/nlambda 
> that can be fully processed at compile time, without creating any 
> overhead for runtime. The understanding in the Lisp community seems to 
> be that macros cover the most interesting uses of FEXPRs/nlambda, and 
> that the remaining uses are not interesting enough. (Personally, I am 
> not convinced by such arguments, I would prefer a language in which 
> there were FEXPRs/nlambda or similar features available, but I guess 
> most Lispers and Schemers would disagree here.)

Yes, I would disagree here. The current model of Scheme or
CL is simpler to understand. Code that runs always
follow a simple strict evaluation model. Macros
provide source transformations, independent of that.

> 
> > It's all a huge mess though.  I have the impression Scheme's hygenic
> > macros are much cleaner than what CL's do.  I've never used them though.
> 
> Macro hygiene is not really a related topic here. Hygienic macro systems 
> may give you better encapsulation to a certain degree, but apart from 
> that, hygienic and non-hygienic macro systems essentially have the same 
> expressive power. (R5RS Scheme's syntax-rules don't have the full 
> expressive power of more complete macro systems, but that's not a 
> conceptual restriction, but rather a deliberate design choice of the 
> designers of syntax-rules.)
> 
> You may prefer either hygienic or non-hygienic macro systems for various 
> reasons, but the limitation that you cannot pass macros as first-class 
> values remains the same.
> 
> 
> Pascal

If one understands what a macro (in CL or Scheme)
is (a source transformation in a different stage), one
will understand that it does not make sense to pass them
around that way. It is a limitation that
is the result of some language design decision. Sure
we could relive the evolution of Lisp and question
some of these design choices. But for practical purposes
I would propose to get over it and use CL as it is.

-- 
http://lispm.dyndns.org
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187020962.363027.66230@i13g2000prf.googlegroups.com>
On Aug 11, 1:22 am, Paul Rubin <·············@NOSPAM.invalid> wrote:
> As (iirc) you or Rainer already
> mentioned, you cannot pass a macro to a HOF.

You also can't pass "if" or other syntactic entities.  So what?  (You
also can't shovel snow with a horse.)
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13br2c1lsm71e61@corp.supernews.com>
Pascal Costanza wrote:
> Yes, but you cannot implement a function that does "the right thing" for
> the following form (in a strict language)...

This wasn't about strict languages.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqq03gitgeb46@corp.supernews.com>
Pascal Costanza wrote:
> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be
> implemented as higher-order functions. They don't take functions as
> parameters, they take code fragments as parameters.

For example, this:

  (dotimes (j 99) (binomial (/ j 2) 100))

becomes:

  dotimes 99 (\j binomial (j/2) 100)

or:

  dotimes 99 (fun j -> binomial (j/2) 100)

>> To make that smooth, you'd need as little syntactic overhead as
>> possible. Haskell's syntax is minimal enough for that IMHO.
> 
> Macros are not about making syntax smooth, they are about hiding
> implementation details. If you implement the functional equivalents of
> WHEN, UNLESS, DOLIST and DOTIMES, you have to impose the requirement on
> the call sites that they need to wrap code fragments in closures such
> that their evaluation is correctly delayed and can be controlled by
> those functions. This need to wrap code fragments in closures is only
> there for the functionality that _should_ actually be the job of the
> called functions. So higher-order functions actually clutter client code
> with otherwise (!) uninteresting implementation details.

As you can see, minimal clutter in a strict language and no clutter in a
lazy language.

> What is good about syntactic abstractions, as provided by macros, is
> that you can change the implementation underneath without changing the
> call sites. For example, you could expand into goto statements instead
> of higher-order functions to control evaluation of code fragments by
> jumping around them, to avoid the overhead of creating closures
> completely. Such a change of implementation strategy is not possible
> with functional abstractions because you are essentially stuck with
> higher-order functions and closures, and don't have anything else at
> your disposal.

You don't need anything else.

>> I'm not sure how much of that argument transfers to more complicated
>> macros.
> 
> Delaying evaluation of parts of a macro invocation is only one of the
> many possible uses for macros. With macros, you have complete access to
> the whole macro invocation and can fully deconstruct all the code
> fragments in whatever way you like. Closures, functions, and function
> arguments in lazy languages, are in contrast typically opaque: You
> cannot modify them, and you typically cannot even inspect them. So with
> macros, you have a clear increase in expressive power.

Turing argument.

> This is what makes macros especially suitable for embedding
> domain-specific languages: You are not restricted by the preferred
> programming style of the host language at all. In a pure functional
> language, your domain-specific language will essentially always remain a
> functional language. In a pure object-oriented language, your
> domain-specific language will essentially always remain an
> object-oriented language. And so on. As soon as you have macros, you can
> always break out of whatever the underlying language favors.

Lisp macros are just a rudimentary form of term rewriter, limited to
s-exprs.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i58haF3mkkdmU1@mid.individual.net>
Jon Harrop wrote:
> 
> You don't need anything else.
> 
> 
> Turing argument.
> 

LOL

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187021153.717143.270130@l22g2000prc.googlegroups.com>
On Aug 11, 12:32 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Lisp macros are just a rudimentary form of term rewriter, limited to
> s-exprs.

Lisp macros let one usefully rewrite/generate code.  (The code happens
to be expressed as s-expressions, but that's a separable issue.)  Term
rewriters only rewrite data.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1a9ogkfh3q95@corp.supernews.com>
Andy Freeman wrote:
> On Aug 11, 12:32 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Lisp macros are just a rudimentary form of term rewriter, limited to
>> s-exprs.
> 
> Lisp macros let one usefully rewrite/generate code.  (The code happens
> to be expressed as s-expressions, but that's a separable issue.)  Term
> rewriters only rewrite data.

S-exprs are a subset of terms (code is data).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187033216.433112.66460@o61g2000hsh.googlegroups.com>
> S-exprs are a subset of terms (code is data).

You said in an earlier post:

"Macros just rewrite terms and you can easily write a term rewriter as
a function."

A CL macro is a regular Lisp function. Thanks to the Turing-
completeness of certain rewrite systems, you can view any function as
a term-rewriter. Saying that "macros just rewrite terms" is thus
tautological.

The utility of CL macros doesn't have anything to do with whether they
are equivalent to term rewriting. Their utility stems from the fact
that CL environments allow these functions to operate on the source
code, evaluate them at compile-time, and compile their output inline
with the rest of the program.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1h456hjf8kaa@corp.supernews.com>
Rayiner Hashem wrote:
> The utility of CL macros doesn't have anything to do with whether they
> are equivalent to term rewriting. Their utility stems from the fact
> that CL environments allow these functions to operate on the source
> code, evaluate them at compile-time, and compile their output inline
> with the rest of the program.

Yes, that is true of most term rewriters, of course, and is a subset of the
functionality provided by OCaml's Camlp4 macros or Mathematica.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187040848.109506.142450@w3g2000hsg.googlegroups.com>
> Yes, that is true of most term rewriters, of course, and is a subset of the
> functionality provided by OCaml's Camlp4 macros or Mathematica.

In what way are Camlp4's macros a superset of Lisp's macros? Unless
you call having to deal with things like AST nodes and a poorly-
integrated external tool "extra features"?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1ngco4gnqrd6@corp.supernews.com>
Rayiner Hashem wrote:
>> Yes, that is true of most term rewriters, of course, and is a subset of
>> the functionality provided by OCaml's Camlp4 macros or Mathematica.
> 
> In what way are Camlp4's macros a superset of Lisp's macros?

OCaml takes Lisp's macros and adds:

1. Native support for any data structure including lazy sequences and trees,
not just s-exprs.

2. Extensible lexers.

3. Extensible parsers.

4. Heterogeneous language support.

5. Static checking (e.g. rewrite rule redundancy).

6. Pattern matching.

7. Associativities and precedences.

You can reimplement all of this in Common Lisp, of course: Greenspunning.

> Unless you call having to deal with things like AST nodes

A generalization of s-exprs.

> and a poorly-integrated external tool "extra features"?

Camlp4 is completely integrated and has been for years, of course.

For an example, look at the OCaml Minim interpreter from this thread that
uses an extensible grammar written as a macro to parse a superset of the
Minim syntax and generate an abstract syntax tree for the interpreter. Even
though the target syntax (specified by a Lisper) is a s-expr, the OCaml
code is more concise than the Lisp.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187056747.831988.177750@q75g2000hsh.googlegroups.com>
> 2. Extensible lexers.
> 3. Extensible parsers.
> 7. Associativities and precedences.

All at the cost of having to deal with the conceptual overhead of
lexing and parsing. If you really want custom syntax, then you can
spin this as a win, but the world seems to have decided (*cough* XML
*cough*) that writing a lot of lexers and parsers is a stupid idea,
and we should just pick a syntax and stick with it.

The whole point of sexprs and macros is that they're extremely
lightweight, and thus easy to use everywhere. It's like the same
argument people were making wrt the extra syntax for HOFs in Lisp ---
when a feature has a of extra baggage, people won't use it as often. A
full parser-generator carries a lot of baggage. My eyes glazed over
reading the Camlp4 tutorial...

> You can reimplement all of this in Common Lisp, of course: Greenspunning.

I don't think you really have a handle on what Greenspun's rule means.
It's not Greenspunning when a higher-level concept can be expressed
cleanly using a low-level mechanism. For example, I'd argue that
integrated database or networking libraries are far more important
than pattern-matching for the majority of programmers. Is it
"Greenspunning" to implement database or network functionality outside
of the language core?

> Camlp4 is completely integrated and has been for years, of course.

For small values of "integrated". The camlp4 and camlp5 programs are
preprocessors, with some magic in the driver script integrate them
(with lots of provisos). The camlp4 tutorial on the INRIA website
warns off the bat that the preprocessor uses a different parser than
the compiler, and thus behave slightly differently, particularly wrt
error messages. Looking at the latest O'Caml source code, this still
seems to be the case. The tutorial also notes how you can't interleave
macros and source code using the compiler, but must put them in
separate object files. These sounds like pure implementation
limitations, though.

> For an example, look at the OCaml Minim interpreter from this thread that
> uses an extensible grammar written as a macro to parse a superset of the
> Minim syntax and generate an abstract syntax tree for the interpreter. Even
> though the target syntax (specified by a Lisper) is a s-expr, the OCaml
> code is more concise than the Lisp.

It seems to me that this is just because camlp4 has a parser generator
built-in, and Lisp doesn't. Me, I saw the BNF in your post and
immediately moved on. Why in god's name would I use an infix syntax
for my Lisp DSL? I mean, there isn't anything stopping me, there are
lot's of parser-generators for Lisp after all, but it's totally
pointless extra complexity!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c2cftt318hoee@corp.supernews.com>
Rayiner Hashem wrote:
>> 2. Extensible lexers.
>> 3. Extensible parsers.
>> 7. Associativities and precedences.
> 
> All at the cost of having to deal with the conceptual overhead of
> lexing and parsing. If you really want custom syntax, then you can 
> spin this as a win, but the world seems to have decided (*cough* XML
> *cough*) that writing a lot of lexers and parsers is a stupid idea,
> and we should just pick a syntax and stick with it.

Think of as many grammatical file formats as you can. What proportion are
XML? For me, virtually none. SVG is the only one I've done much work with
and XML is just a wrapper for custom grammars encoding data in strings:

       <path style="opacity:0.35;stroke:none;"           
d="M221.688,252.04c0-15.085-28.926-27.313-64.607-27.313s-64.607,12.2
29-64.607,27.313c0,5.362,3.669,10.359,9.983,14.579c-17.49,3.476-30.47,7.433-30.4
7,7.433l-24.77,62.429l33.686-13.637   
c38.106,5.626,125.094-24.09,109.44-47.386
c18.779-4.777,31.345-13.477,31.345-23.418z"/>

> The whole point of sexprs and macros is that they're extremely
> lightweight, and thus easy to use everywhere. It's like the same
> argument people were making wrt the extra syntax for HOFs in Lisp ---
> when a feature has a of extra baggage, people won't use it as often.

Except that OCaml's macros are also lightweight. Look at the Minim
interpreters. Even though the Minim language uses Lisp's grammar, the Lisp
implementations are still all much longer than the OCaml despite its
alleged "baggage".

> A full parser-generator carries a lot of baggage.

Functionality, not baggage.

>> You can reimplement all of this in Common Lisp, of course: Greenspunning.
> 
> I don't think you really have a handle on what Greenspun's rule means.
> It's not Greenspunning when a higher-level concept can be expressed
> cleanly using a low-level mechanism. For example, I'd argue that
> integrated database or networking libraries are far more important
> than pattern-matching for the majority of programmers.

I am aware that networking and databases are extremely popular but I must
confess I don't understand why. I've used both before but very rarely
compared to lexers, parser, pretty printer, pattern matching, graphics and
so on.

> Is it "Greenspunning" to implement database or network functionality
> outside of the language core? 

If you advocate doing so for your one program by lashing together an ad-hoc,
informally-specified, bug-ridden and slow implementation of half of .NET,
then hell yes you're Greenspunning.

>> Camlp4 is completely integrated and has been for years, of course.
> 
> For small values of "integrated". The camlp4 and camlp5 programs are
> preprocessors, with some magic in the driver script integrate them 
> (with lots of provisos). The camlp4 tutorial on the INRIA website
> warns off the bat that the preprocessor uses a different parser than
> the compiler, and thus behave slightly differently, particularly wrt 
> error messages.

Yes, the extensible parser provided by Camlp4 is LL whereas the default
front-end of the OCaml compiler is LALR.

> Looking at the latest O'Caml source code, this still 
> seems to be the case. The tutorial also notes how you can't interleave 
> macros and source code using the compiler, but must put them in
> separate object files.

For batch compilation, yes.

> These sounds like pure implementation limitations, though.

Using OCaml's macros from the top-level is exactly the same as using Lisp's
macros from the REPL. The effect of preprocessing only affects batch
compilation.

The following excerpt from a running OCaml top-level demonstrates
interactive extension of the OCaml grammar by adding a try..finally
construct:

$ ocaml camlp4oof.cma
        Objective Caml version 3.10.0

        Camlp4 Parsing version 3.10.0

# try 3 finally print_endline "foo";;
Parse error: "with" expected after [sequence] (in [expr])
# open Camlp4.PreCast.Syntax;;
# EXTEND Gram
   expr: LEVEL "top"
   [[ "try"; f=sequence; "finally"; g=expr ->
        <:expr<
          ((function
            | `Val v, g -> g(); v
            | `Exn e, g -> (try g() with _ -> ()); raise e)
                (try `Val($f$) with e -> `Exn e), (fun () -> $g$)))
        >>]];
  END;;
- : unit = ()
# try 3 finally print_endline "foo";;
foo
- : int = 3

>> For an example, look at the OCaml Minim interpreter from this thread that
>> uses an extensible grammar written as a macro to parse a superset of the
>> Minim syntax and generate an abstract syntax tree for the interpreter.
>> Even though the target syntax (specified by a Lisper) is a s-expr, the
>> OCaml code is more concise than the Lisp.
> 
> It seems to me that this is just because camlp4 has a parser generator
> built-in, and Lisp doesn't. Me, I saw the BNF in your post and
> immediately moved on. Why in god's name would I use an infix syntax
> for my Lisp DSL?

1. Infix was specified by your client.
2. Infix is more concise or clearer for that domain.
3. Infix is the convention for that domain.
...

Conversely, why in god's name would you prohibit free form DSLs?

> I mean, there isn't anything stopping me, there are 
> lot's of parser-generators for Lisp after all, but it's totally
> pointless extra complexity!

I use them all the time precisely because they're integrated into the
language and are so easy to use.

Look at our vector graphics software, for example:

  http://www.ffconsultancy.com/products/smoke_vector_graphics/

There are front-ends for PostScript and SVG. PostScript is implemented as a
typical interpreter. SVG is handled using an XML parser and then custom
parsers for the string formats encoded in the XML. The mathematical
typesetter parses both LaTeX and Mathematica formats. A separate 3D
visualization tool handles Mathematica's graphics, parsing them from
Mathematica expressions.

An OCaml program processes the LyX source of my book OCaml for Scientists,
color syntax highlighting the results.

The PostScript, PDF, SVG, LaTeX, Mathematica and LyX formats all have
something in common: they are not s-exprs.

The last time I wrote something handling s-exprs it was for an undocumented
format used internally by some company software that was probably written
by a bloody Lisper. We replaced the s-exprs with XML but the XML was used
to encode XMLRPC that in turn encoded a type system. So this turned out to
be another XML-based format requiring lots of parsing on top of the XML.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187102921.306705.310450@k79g2000hse.googlegroups.com>
> Think of as many grammatical file formats as you can. What proportion are
> XML? For me, virtually none. SVG is the only one I've done much work with
> and XML is just a wrapper for custom grammars encoding data in strings:

Uh, the internet is based on XHTML and it's ilk. That covers most of
the documents being created today. Office is now XML-based. That
covers most of the rest.

> Except that OCaml's macros are also lightweight. Look at the Minim
> interpreters. Even though the Minim language uses Lisp's grammar, the Lisp
> implementations are still all much longer than the OCaml despite its
> alleged "baggage".

I didn't see any Lisp entry that took advantage of a canned parser-
generator and pattern-matching package like you did.

> Functionality, not baggage.

Tell that to the millions of people buying iPods...

> I am aware that networking and databases are extremely popular but I must
> confess I don't understand why. I've used both before but very rarely
> compared to lexers, parser, pretty printer, pattern matching, graphics and
> so on.

Because there is a lot of money in networking and databases, much less
in graphics, and not so much in compilers.

> If you advocate doing so for your one program by lashing together an ad-hoc,
> informally-specified, bug-ridden and slow implementation of half of .NET,
> then hell yes you're Greenspunning.

Who says you have to do it for one program? Networking and databases
are generally-applicable tools. One can quite easily build them as
generally-usable libraries. The same is true for pattern matching.
Consider a related example: iteration constructs. Iteration constructs
are built-in to most languages, but in Lisps, they're implemented as
macros. Far from being ad-hoc and bug-ridden, CL and Scheme have some
of the best iteration facilities available.

Look at it another way. If pattern-matching were an official part of
CL, how do you think it'd be implemented? As part of the compiler?
What would be the point? It would likely be implemented the same way
LOOP is, as a macro, with some attention to making sure the compiler
generated good code for it.

> Yes, the extensible parser provided by Camlp4 is LL whereas the default
> front-end of the OCaml compiler is LALR.

Right, because its completely not integrated. Look at the source code,
it's a completely separate thing.

> Using OCaml's macros from the top-level is exactly the same as using Lisp's
> macros from the REPL. The effect of preprocessing only affects batch
> compilation.

So it's only "integrated" if you type all your code into the top-
level? What happens when I want to use the compiler?

> 1. Infix was specified by your client.
> 2. Infix is more concise or clearer for that domain.
> 3. Infix is the convention for that domain.

This stuff doesn't really matter. DSLs are for programmers, not the
clients or end-users. Lispers aren't going to want an alien infix
language in the middle of their Lisp code. This is not just a Lisp
thing, either. Look at something like LINQ/XML in C#. It's designed
for manipulating XML, but it looks like C#, not like XML.

There is a place for lexers and parsers and the attendent tools. If
you're writing a TeX replacement, you're gonna need to parse TeX.
However, that's all orthogonal to DSLs and macros.

> I use them all the time precisely because they're integrated into the
> language and are so easy to use.

Instead of complaining that macros don't do what they're not intended
to do, have you actually looked at CL packages that are designed to
express parsers? Most even use macros to integrate parser definitions
into the progrma.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dp1we683mb.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Yes, the extensible parser provided by Camlp4 is LL whereas the default
>> front-end of the OCaml compiler is LALR.
>
> Right, because its completely not integrated. Look at the source code,
> it's a completely separate thing.
>
>> Using OCaml's macros from the top-level is exactly the same as using Lisp's
>> macros from the REPL. The effect of preprocessing only affects batch
>> compilation.
>
> So it's only "integrated" if you type all your code into the top-
> level? 

No. Read the fine manual.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4f8m8igui9d1@corp.supernews.com>
Rayiner Hashem wrote:
>> Think of as many grammatical file formats as you can. What proportion are
>> XML? For me, virtually none. SVG is the only one I've done much work with
>> and XML is just a wrapper for custom grammars encoding data in strings:
> 
> Uh, the internet is based on XHTML and it's ilk.

TCP/IP, UDP, HTTP, FTP, SMTP and POP are not "based on XHTML and it's ilk".

I assume your quip was supposed to refer to the web rather than the
internet, but even that isn't true in any meaningful sense. The source code
for most web pages is not XHTML. Moreover, much of the content is in DIVX,
MPEG, AVI, PNG, GIF, JPEG, Flash, JavaScript, Java and a wide variety of
other formats that are in no way "based on XHTML and it's ilk".

> That covers most of the documents being created today.

In other worse, if you ignore all non-XML documents then you only need to
handle XML. Should anyone want to analyze Wikipedia articles or anything in
PDF (like, say, most of science) they are screwed.

> Office is now XML-based.

Several formats are now XML based. That doesn't help if you want to automate
the handling of the other formats.

>> If you advocate doing so for your one program by lashing together an
>> ad-hoc, informally-specified, bug-ridden and slow implementation of half
>> of .NET, then hell yes you're Greenspunning.
> 
> Who says you have to do it for one program?...

Your intentions are admirable but you are joining the ever-growing ranks of
Lisp programmers who claim that problems can be solved easily but never
actually solve them. Look at Andre Thieme and Dan Bensens pattern matching
libraries, for example. They were both announced months ago and deemed to
be trivial...

> Look at it another way. If pattern-matching were an official part of
> CL, how do you think it'd be implemented? As part of the compiler?
> What would be the point?

You address this in your next point:

> It would likely be implemented the same way 
> LOOP is, as a macro, with some attention to making sure the compiler
> generated good code for it.

"with some attention to making sure the compiler generated good code for it"

>> Yes, the extensible parser provided by Camlp4 is LL whereas the default
>> front-end of the OCaml compiler is LALR.
> 
> Right, because its completely not integrated. Look at the source code,
> it's a completely separate thing.

The ARM and AMD64 backends are as much "completely separate things" as the
LALR and extensible LL frontends, yes.

>> Using OCaml's macros from the top-level is exactly the same as using
>> Lisp's macros from the REPL. The effect of preprocessing only affects
>> batch compilation.
> 
> So it's only "integrated" if you type all your code into the top-
> level?

For what meaning of "integrated"?

> What happens when I want to use the compiler? 

It works?

>> 1. Infix was specified by your client.
>> 2. Infix is more concise or clearer for that domain.
>> 3. Infix is the convention for that domain.
> 
> This stuff doesn't really matter.

The specification given to you by your client, clarity and convention don't
matter?

> DSLs are for programmers, not the clients or end-users.

If your program is never going to see the light of day then that might be a
fair assumption. If other programmers are going to maintain it, the client
wants it or it is a product for end-users then you should try harder and do
better. If you choose the right tools, it is easy.

> Lispers aren't going to want an alien infix language in the middle of
> their Lisp code. 

Like regexps?

Also, note that you have replaced "non-sexpr" with "alien infix" for no
logical reason.

> This is not just a Lisp 
> thing, either. Look at something like LINQ/XML in C#. It's designed
> for manipulating XML, but it looks like C#, not like XML.

Yes.

> There is a place for lexers and parsers and the attendent tools. If
> you're writing a TeX replacement, you're gonna need to parse TeX.
> However, that's all orthogonal to DSLs and macros.

How is the DSL Tex orthogonal to DSLs?

>> I use them all the time precisely because they're integrated into the
>> language and are so easy to use.
> 
> Instead of complaining...

That was not a complaint.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187143152.170821.192510@q4g2000prc.googlegroups.com>
> Your intentions are admirable but you are joining the ever-growing ranks of
> Lisp programmers who claim that problems can be solved easily but never
> actually solve them.

That's really irrelevant to this discussion. Pattern matching
facilities could be implemented in Lisp, quite cleanly. The fact that
nobody is interested in doing so, well, maybe that's because Lispers
don't feel compelled to express everything as a pattern match. Even
after all this discussion, your pattern matching example is merely
back to almost the brevity of my Lisp DSL, only with more weird
punctuation and more dependent on the compiler for optimization. So
excuse me if I don't exactly feel compelled to run out and implement a
pattern-matching library.

> "with some attention to making sure the compiler generated good code for it"

Note that this does not imply building in pattern-matching support
into the compiler. It's just a general point: when you express a low-
level feature in a high-level language, you have to design the high-
level-feature to generate code well-tuned for the strengths and
weaknesses of the underlying compiler. Similar points apply to CLOS or
LOOP for that matter.

> The ARM and AMD64 backends are as much "completely separate things" as the
> LALR and extensible LL frontends, yes.

The ARM and AMD64 backends are compiled into ocamlopt. camlp4 is a
separate program called from the compiler driver.

> For what meaning of "integrated"?

Completely transparent, like in Lisp. I can type a macro into the
REPL, and I can call it from a file I compile. I can have a macro
local to a function that uses it and can call COMPILE-FILE on the
thing. It's all about lowering the barrier to using syntactic
extension.

> If your program is never going to see the light of day then that might be a
> fair assumption. If other programmers are going to maintain it, the client
> wants it or it is a product for end-users then you should try harder and do
> better. If you choose the right tools, it is easy.

Do your end users read and edit your source code?

> Also, note that you have replaced "non-sexpr" with "alien infix" for no
> logical reason.

In the context of Lisp code, non-sexpr is indeed alien. In the context
of C# code, sexpr's would be alien too.

> How is the DSL Tex orthogonal to DSLs?

You seem to fail to understand the point of macros in a very
fundamental way. They are not intended to express file formats
external to the program. They are used to express domain-specific
concepts within the source code of the program. In the context of a
TeX tool, you would use a lexer/parser to convert TeX source into some
IR, and you could very well write a typesetting DSL to operate on that
IR within your program. The tool appropriate for the first use isn't
necessarily appropriate for the second use.

What it seems you're looking for is really a lexer/parser generator
integrated with READ. That would actually be quite a neat idea, but
it's orthogonal to macros. Lisp macros would indeed operate perfectly
happily on any AST nodes returned from such a reader!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4q7u5qjs7oe4@corp.supernews.com>
Rayiner Hashem wrote:
> Even after all this discussion, your pattern matching example is merely
> back to almost the brevity of my Lisp DSL,

This is subjective speculation, of course, but even my original code
surpassed the brevity of yours and will be an order of magnitude faster and
so robust there is no need to unit test individual functions.

> So excuse me if I don't exactly feel compelled to run out and implement a
> pattern-matching library.

No need to implement it yourself: use a modern functional programming
language to develop better code faster.

>> The ARM and AMD64 backends are as much "completely separate things" as
>> the LALR and extensible LL frontends, yes.
> 
> The ARM and AMD64 backends are compiled into ocamlopt.

Which is separate from ocaml, ocamlnat and ocamlc.

> I can type a macro into the REPL, and I can call it from a file I compile.

Like this:

# #load "pa_tryfinally.ml";;
...
# try 3 finally print_endline "foo";;
foo
- : int = 3

or this:

# #load "pa_tryfinally.cmo";;
# try 3 finally print_endline "foo";;
foo
- : int = 3

> It's all about lowering the barrier to using syntactic extension.

What barrier?

>> If your program is never going to see the light of day then that might be
>> a fair assumption. If other programmers are going to maintain it, the
>> client wants it or it is a product for end-users then you should try
>> harder and do better. If you choose the right tools, it is easy.
> 
> Do your end users read and edit your source code?

Absolutely. Most of what we sell is in source form. I couldn't very well
turn around to Wolfram Research and say "I laugh at your alien infix syntax
for Mathematica and will implement something very similar to Lisp instead".

>> Also, note that you have replaced "non-sexpr" with "alien infix" for no
>> logical reason.
> 
> In the context of Lisp code, non-sexpr is indeed alien. In the context
> of C# code, sexpr's would be alien too.

They don't need to be alien.

>> How is the DSL Tex orthogonal to DSLs?
> 
> You seem to fail to understand the point of macros in a very
> fundamental way.

That's because you're assuming that all macro systems have the limitations
of Lisp's.

> They are not intended to express file formats 
> external to the program. They are used to express domain-specific 
> concepts within the source code of the program. In the context of a
> TeX tool, you would use a lexer/parser to convert TeX source into some
> IR, and you could very well write a typesetting DSL to operate on that
> IR within your program. The tool appropriate for the first use isn't
> necessarily appropriate for the second use.

Yes, those are all limitations of Lisp macros. Those statements do not apply
to macros in general. OCaml's macro system, for example, has none of those
limitations.

> What it seems you're looking for is really a lexer/parser generator
> integrated with READ. That would actually be quite a neat idea,

Absolutely.

> but it's orthogonal to macros.

As Camlp4 has shown, if you integrate these two sets of functionality the
result is more than the sum of its parts. You can lex, parse, rewrite
(using pattern matching) and code generate in a single step for any
language and apply the result to external or embedded code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187188214.749257.278760@19g2000hsx.googlegroups.com>
> This is subjective speculation, of course, but even my original code
> surpassed the brevity of yours and will be an order of magnitude faster and
> so robust there is no need to unit test individual functions.

You're delusional. The final syntax you ended showing was something
like:

| (R32|M32), R32 -> emit (op 0x80 => r `f => rm `g)

The equivalent in the Lisp syntax is both more compact and more
faithful to the Intel manual:

((rm32 r32) (#x80 /r /rm))

You might save a few lines here and there combining the left-hand-side
patterns, but at the cost of having to add extra "64-bit" symbols on
the right-hand side, and further deviating from the Intel syntax.

This is not including the code needed for whatever elaborate type
theory is necessary to express facts like:

/0 /r is an invalid right-hand-side (/0 implies /r)
#x99 #x80 is an invalid opcode sequence (two-byte opcodes start with
#x0F)
etc.

And it *still* doesn't feed patterns into an auto-tester. Despite your
ridiculous assertions, testing is indeed necessary for every single
pattern, because no type theory is going to catch the fact that you
mistyped #x85 as an opcode when the instruction really has an opcode
of #x82. Entertainingly, while I was writing the assembler, the auto-
tester exposed several inconsistenties and typos in AMD's version of
the manual. I guess erroneous specifications are as real a danger as
implementation bugs after all!

> > I can type a macro into the REPL, and I can call it from a file I compile.
>
> Like this:
>
> # #load "pa_tryfinally.ml";;
> ...
> # try 3 finally print_endline "foo";;
> foo
> - : int = 3
> or this:
>
> # #load "pa_tryfinally.cmo";;
> # try 3 finally print_endline "foo";;
> foo
> - : int = 3

1) You have things backwards. I wanted to type the macro into the
REPL, and call it from a compiled file.

2) Neither of these invoke the native-code compiler.

3) Neither of these examples changes the fact that all of this is
accomplished by calling the preprocessor inside the driver, not having
an integrated compiler/macro system.

> As Camlp4 has shown, if you integrate these two sets of functionality the
> result is more than the sum of its parts. You can lex, parse, rewrite
> (using pattern matching) and code generate in a single step for any
> language and apply the result to external or embedded code.

I'm done with this. Come back you figure out what I meant when I said
that Lisp's macro system would happily handle whatever objects READ
threw at it. Learn about the distinctions between the macro system,
the backquote mechanism, and the reader. Obviously you don't have even
a basic understanding of the macro facility, or its place within a
Lisp system. You couldn't even figure out the trivial two-line "foo"
macro I showed earlier...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c6den74r8am7d@corp.supernews.com>
Rayiner Hashem wrote:
>> This is subjective speculation, of course, but even my original code
>> surpassed the brevity of yours and will be an order of magnitude faster
>> and so robust there is no need to unit test individual functions.
> 
> You're delusional. The final syntax you ended showing was something
> like:
> 
> | (R32|M32), R32 -> emit (op 0x80 => r `f => rm `g)
>
> The equivalent in the Lisp syntax is both more compact and more
> faithful to the Intel manual:
> 
> ((rm32 r32) (#x80 /r /rm))

The majority of your code gets smaller, like these:

(define-xm128-encoder addpd #x66 #x0F #x58)
(define-xm128-encoder addps #x0F #x58)
(define-xm128-encoder addsd #xF2 #x0F #x58)
(define-xm128-encoder addss #xF3 #x0F #x58)

(define-xm128-encoder addsubpd #x66 #x0F #xD0)
(define-xm128-encoder addsubps #xF2 #x0F #xD0)

(define-xm128-encoder andnpd #x66 #x0F #x55)
(define-xm128-encoder andnps #x0F #x55)
(define-xm128-encoder andpd #x66 #x0F #x54)
(define-xm128-encoder andps #x0F #x54)

(define-cmp-encoder cmppd #x66 #x0F #xC2)
(define-cmp-encoder cmpps #x0F #xC2)
(define-cmp-encoder cmpsd #xF2 #x0F #xC2)
(define-cmp-encoder cmpss #xF3 #x0F #xC2)

(define-xm128-encoder comisd #x66 #x0F #x2F)
(define-xm128-encoder comiss #x0F #x2F)

(define-xm64-encoder cvtdq2pd #xF3 #x0F #xE6)
(define-xm128-encoder cvtdq2ps #x0F #x5B)
(define-xm128-encoder cvtpd2dq #xF2 #x0F #xE6)

; cvtpd2pi

(define-xm128-encoder cvtpd2ps #x66 #x0F #x5A)

; cvtpi2pd
; cvtpi2ps

(define-xm128-encoder cvtps2dq #x66 #x0F #x5B)
(define-xm64-encoder cvtps2pd #x0F #x5A)

; cvtps2pi

(define-rx64-encoder cvtsd2si #xF2 #x0F #x2D)
(define-xm64-encoder cvtsd2ss #xF2 #x0F #x5A)
(define-xr-encoder cvtsi2sd #xF2 #x0F #x2A)
(define-xr-encoder cvtsi2ss #xF3 #x0F #x2A)
(define-xm32-encoder cvtss2sd #xF3 #x0F #x5A)
(define-rx32-encoder cvtss2si #xF3 #x0F #x2D)
(define-xm128-encoder cvttpd2dq #x66 #x0F #xE6)

; cvtpd2pi

(define-xm128-encoder cvttps2dq #xF3 #x0F #x5b)

; cvttpd2pi

(define-rx64-encoder cvttsd2si #xF2 #x0F #x2C)
(define-rx32-encoder cvttss2si #xF3 #x0F #x2C)

(define-xm128-encoder divpd #x66 #x0F #x5E)
(define-xm128-encoder divps #x0F #x5E)
(define-xm128-encoder divsd #xF2 #x0F #x5E)
(define-xm128-encoder divss #xF3 #x0F #x5E)

; fxrstor
; fxsave

(define-xm128-encoder haddpd #x66 #x0F #x7C)
(define-xm128-encoder haddps #xF2 #x0F #x7C)

(define-xm128-encoder hsubpd #x66 #x0F #x7D)
(define-xm128-encoder hsubps #xF2 #x0F #x7D)

; lddqu

(define-encoder ldmxcsr (source)
  ((m32) (#x0F #xAE /2)))

(define-x-encoder maskmovdqu #x66 #x0F #xF7)

(define-xm128-encoder maxpd #x66 #x0F #x5F)
(define-xm128-encoder maxps #x0F #x5F)
(define-xm128-encoder maxsd #xF2 #x0F #x5F)
(define-xm128-encoder maxss #xF3 #x0F #x5F)

(define-xm128-encoder minpd #x66 #x0F #x5D)
(define-xm128-encoder minps #x0F #x5D)
(define-xm128-encoder minsd #xF2 #x0F #x5D)
(define-xm128-encoder minss #xF3 #x0F #x5D)

(define-mov0-128-encoder movapd (#x66 #x0F #x28) (#x66 #x0F #x29))
(define-mov0-128-encoder movaps (#x0F #x28) (#x0F #x29))

> This is not including the code needed for whatever elaborate type
> theory is necessary to express facts like:
> 
> /0 /r is an invalid right-hand-side (/0 implies /r)

Already checked.

> #x99 #x80 is an invalid opcode sequence (two-byte opcodes start with
> #x0F)

Make n-byte opcodes type constructors with the 0x0F implicit.

> And it *still* doesn't feed patterns into an auto-tester.

But it does convey static type information.

>> Like this:
>>
>> # #use "pa_tryfinally.ml";;
>> ...
>> # try 3 finally print_endline "foo";;
>> foo
>> - : int = 3
>> or this:
>>
>> # #load "pa_tryfinally.cmo";;
>> # try 3 finally print_endline "foo";;
>> foo
>> - : int = 3
> 
> 1) You have things backwards. I wanted to type the macro into the
> REPL, and call it from a compiled file.

$ ocaml camlp4oof.cma
        Objective Caml version 3.10.0

        Camlp4 Parsing version 3.10.0

# open Camlp4.PreCast.Syntax;;
#   EXTEND Gram
    expr: LEVEL "top"
    [[ "try"; f=sequence; "finally"; g=expr ->
         <:expr<
           ((function
             | `Val v, g -> g(); v
             | `Exn e, g -> (try g() with _ -> ()); raise e)
              ((try `Val $f$ with e -> `Exn e), (fun () -> $g$)))
         >>]];
  END;;
- : unit = ()
# #use "tryfinally_test.ml";;
        Camlp4 Parsing version 3.10.0

Foo!
Exception: Pervasives.Exit.

> 2) Neither of these invoke the native-code compiler.

Actually they both were: ocamlnat.

> 3) Neither of these examples changes the fact that all of this is
> accomplished by calling the preprocessor inside the driver, not having
> an integrated compiler/macro system.

In other words, they've placed the front end of the compiler at the front.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187200539.389916.170330@d55g2000hsg.googlegroups.com>
> The majority of your code gets smaller, like these:
> (define-xm128-encoder addpd #x66 #x0F #x58)

How do these get smaller? I'm curious to hear what exactly you think
these lines are doing...

> But it does convey static type information.

Not very reassuring, when the majority of the error-prone code here is
in the instruction patterns and opcodes, which cannot be checked
statically in any comprehensive way. You could check them for some
sort of well-formedness, but there are enormous numbers of well-formed
patterns that are not valid in the x86 instruction set, and the
compiler has no way to know which ones those are.

> # #use "tryfinally_test.ml";;
>         Camlp4 Parsing version 3.10.0
>
> Foo!
> Exception: Pervasives.Exit.
>
> > 2) Neither of these invoke the native-code compiler.
>
> Actually they both were: ocamlnat.

Ok. Ocamlnat seems to be a fairly recent addition. Is ocamlnat good
enough to obsolete the batch compiler? Ie: do people actually use it
for deployed code? It seems to me that most of the complexity of
Camlp4 in the context of the batch compiler stems from the obvious
inability of the batch compiler to interleave compilation with
execution.

> In other words, they've placed the front end of the compiler at the front.

Camlp4 is invoked by the compiler driver as a preprocessor that
bypasses the regular Ocaml frontend. What exactly about this
arrangement screams "integrated" to you? In Lisp, the macro-expander
is completely integrated with the compiler, as much as the parser or
optimizer. The Lisp compiler can (and does) interleave reading, macro-
expansion, optimization, code generation, and code execution.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cantsd9hv5vc5@corp.supernews.com>
Rayiner Hashem wrote:
>> The majority of your code gets smaller, like these:
>> (define-xm128-encoder addpd #x66 #x0F #x58)
> 
> How do these get smaller?

Using currying. I posted the code earlier.

>> But it does convey static type information.
> 
> Not very reassuring, when the majority of the error-prone code here is
> in the instruction patterns and opcodes, which cannot be checked
> statically in any comprehensive way. You could check them for some
> sort of well-formedness, but there are enormous numbers of well-formed
> patterns that are not valid in the x86 instruction set, and the
> compiler has no way to know which ones those are.

Yes.

>> # #use "tryfinally_test.ml";;
>>         Camlp4 Parsing version 3.10.0
>>
>> Foo!
>> Exception: Pervasives.Exit.
>>
>> > 2) Neither of these invoke the native-code compiler.
>>
>> Actually they both were: ocamlnat.
> 
> Ok. Ocamlnat seems to be a fairly recent addition. Is ocamlnat good
> enough to obsolete the batch compiler?

They serve different purposes (on-line and batch) so neither can obsolete
the other. Most people want to generate standalone executables from their
OCaml sources, so they use a batch compiler and the top-level is there
primarily for testing.

> Ie: do people actually use it for deployed code?

Several people are singing its praises. I think scientists will adopt it
because coding in the REPL is preferable when you're massaging data.

> It seems to me that most of the complexity of 
> Camlp4 in the context of the batch compiler stems from the obvious
> inability of the batch compiler to interleave compilation with
> execution.

Yes, I think you must put the macros in separate source files and compile
incrementally. I haven't found that to be a problem in practice as my
syntax extensions tend to apply to source files anyway.

>> In other words, they've placed the front end of the compiler at the
>> front.
> 
> Camlp4 is invoked by the compiler driver as a preprocessor that
> bypasses the regular Ocaml frontend. What exactly about this
> arrangement screams "integrated" to you?

By "integrated", I mean Camlp4 is part of the OCaml distribution and it
works seamlessly with all other parts of the OCaml system. I would refer to
the ability to alter the compiler's front-end as "extensibility".

> In Lisp, the macro-expander 
> is completely integrated with the compiler, as much as the parser or
> optimizer. The Lisp compiler can (and does) interleave reading, macro-
> expansion, optimization, code generation, and code execution.

I wasn't clear when I said that. I meant that these steps can be integrated
into a single block of code with a concise syntax.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187356939.176836.326030@d55g2000hsg.googlegroups.com>
> >> The majority of your code gets smaller, like these:
> >> (define-xm128-encoder addpd #x66 #x0F #x58)
>
> > How do these get smaller?
>
> Using currying. I posted the code earlier.

If you define "define-xm128-encoder" as a function that returns a
function implementing the pattern match, closing over the opcode
parameters, it looks like:

func addpd-encoder = define-xm128-encoder #x66 #x0F #x58

How is this shorter?

Incidentally, even this expression isn't the same. The above macro
also enters an association between ":addpd" and "addpd-encoder" into a
hash-table used for processing assembly source, and enters the pattern
into a database for the auto-tester. Even you seem to begrudgingly
admit that it probably is a good idea to actually test if addpd's
opcode really is #x66 #x0F #x58...

> By "integrated", I mean Camlp4 is part of the OCaml distribution and it
> works seamlessly with all other parts of the OCaml system. I would refer to
> the ability to alter the compiler's front-end as "extensibility".
>

If being forced to put syntax extensions into a seperate file or
having different source-layout rules for the batch compiler and
incremental compiler is "seamless"... Allegro and LispWorks will
happily dump executables without any such restrictions.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cbra3nhpl8l53@corp.supernews.com>
Rayiner Hashem wrote:
>> >> The majority of your code gets smaller, like these:
>> >> (define-xm128-encoder addpd #x66 #x0F #x58)
>>
>> > How do these get smaller?
>>
>> Using currying. I posted the code earlier.
> 
> If you define "define-xm128-encoder" as a function that returns a
> function implementing the pattern match, closing over the opcode
> parameters, it looks like:
> 
> func addpd-encoder = define-xm128-encoder #x66 #x0F #x58
> 
> How is this shorter?

Several reasons:

1. Its "let" not "func".

2. Factor your identifiers: define-xm128-encoder -> Encoder.Define.xm128.
Then open Encoder.Define.

3. Define addpd and friends simultaneously using a tuple.

> Incidentally, even this expression isn't the same. The above macro
> also enters an association between ":addpd" and "addpd-encoder" into a
> hash-table used for processing assembly source, and enters the pattern
> into a database for the auto-tester.

Do that with a HOF.

> Even you seem to begrudgingly 
> admit that it probably is a good idea to actually test if addpd's
> opcode really is #x66 #x0F #x58...

They're magic numbers. I'd load them from a file, preferably the same file
used by another assembler.

>> By "integrated", I mean Camlp4 is part of the OCaml distribution and it
>> works seamlessly with all other parts of the OCaml system. I would refer
>> to the ability to alter the compiler's front-end as "extensibility".
> 
> If being forced to put syntax extensions into a seperate file or
> having different source-layout rules

Different source layout rules?

> for the batch compiler and incremental compiler is "seamless"...

I prefer functions to macros so I'm happy to sacrifice funcall for staged
compilation.

> Allegro and LispWorks will happily dump executables without any such
> restrictions. 

For a small fee.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187378712.366958.97290@50g2000hsm.googlegroups.com>
> 1. Its "let" not "func".

My pseudo-code, my syntax.

> 2. Factor your identifiers: define-xm128-encoder -> Encoder.Define.xm128.
> Then open Encoder.Define.

I can do this in Lisp too, what's your point?

> 3. Define addpd and friends simultaneously using a tuple.

I could write a Lisp macro that would condense these to:

(define-xm128-encoders
  addpd #x66 #x0F #x58
  addps #x67 #x0F #x58
  ...)

It's like a 3-liner, and it's as concise as you're going to get.
You're talking about trivial increases in concision at this point.

> Do that with a HOF.

The auto-tester needs to have, in some manipulable form, the left and
right hand sides of the pattern matches. Eg, it should see:

| (R32|M32), IMM32

and be able to generate source code like:

(:add (:eax 1024))

I don't see how HOFs can achieve this effect.

> They're magic numbers. I'd load them from a file, preferably the same file
> used by another assembler.

This makes your matching syntax explode. Plus, you lose the
specialization and optimization opportunities that come from encoding
them as constants.

Look, you're seeing pieces of a solution that happen to make sense to
you in O'Caml, but I don't think any of all those pieces compose while
meeting the required implementation restrictions. Eg:

so you have

let addpd-encoder = xm128 #x66 #x0F #x58

Can you maintain this concision loading the constants from a file?
While entering patterns into the auto-tester? While associating the
encoder function with the symbol used to represent it in the source
code? You keep hand-waving these "solutions", but each one seems to
just move complexity somewhere else.

> Different source layout rules?

The top-level lets you interleave macros and code, batch compiler
doesn't.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.15.06.45.40.112097@areilly.bpc-users.org>
On Wed, 15 Aug 2007 01:59:12 +0000, Rayiner Hashem wrote:

>> Your intentions are admirable but you are joining the ever-growing ranks of
>> Lisp programmers who claim that problems can be solved easily but never
>> actually solve them.
> 
> That's really irrelevant to this discussion. Pattern matching
> facilities could be implemented in Lisp, quite cleanly. The fact that
> nobody is interested in doing so, well, maybe that's because Lispers
> don't feel compelled to express everything as a pattern match. Even
> after all this discussion, your pattern matching example is merely
> back to almost the brevity of my Lisp DSL, only with more weird
> punctuation and more dependent on the compiler for optimization. So
> excuse me if I don't exactly feel compelled to run out and implement a
> pattern-matching library.

Just fwiw, I've just found the match.ss library in mzscheme
http://download.plt-scheme.org/doc/370/html/mzlib/mzlib-Z-H-27.html
(I've previously used ssax/sxml-match to good effect for parsing xml
files).  In the few places that I've used it so far, it's really very
neat.  Quite powerfull and expressive.  I doubt that I will turn around
now and use it for everything, but it tidied up some messy trees of
destructuring binds for me, beautifully.  I've no doubt that the same sort
of thing could/does exist for CL.

> You seem to fail to understand the point of macros in a very
> fundamental way. They are not intended to express file formats
> external to the program. They are used to express domain-specific
> concepts within the source code of the program. In the context of a
> TeX tool, you would use a lexer/parser to convert TeX source into some
> IR, and you could very well write a typesetting DSL to operate on that
> IR within your program. The tool appropriate for the first use isn't
> necessarily appropriate for the second use.

An existence proof of that sort of thing can be found in tex2page, 
http://download.plt-scheme.org/doc/370/html/tex2page/
which parses TeX files just the way you describe, and then goes on to
render the results in HTML. Converting TeX to intermediate scheme/CL
allows arbitrary (?) TeX macros to be evaluated in the document.  Works
better than other tex -> html converters that I've tried in the past, but
ymmv.

Cheers,

-- 
Andrew
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vd1d$bpf$3@online.de>
Andrew Reilly schrieb:
> [match.ss] tidied up some messy trees of
> destructuring binds for me, beautifully.

Destructuring binds are *exactly* what pattern matching is good for.

Regards,
Jo
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187174659.893157.190810@b79g2000hse.googlegroups.com>
Hi,

> > Lispers aren't going to want an alien infix language in the middle of
> > their Lisp code.
>
> Like regexps?

Regular expressions are actually a very interesting point.  I don't
know if anyone has mentioned this before (I don't have time to skim
through more than a thousand newsgroup posts now :)), but when using
CL-PPCRE, try compiling the following expression:

(lambda (foo bar)
  (ppcre:regex-replace-all "**" foo bar))

So, what happens when you compile this piece of code?  Interestingly,
you get a compile-time error message because of the ill-formed regular
expression!  That's one of the things that compiler macros make
possible (note that REGEX-REPLACE-ALL is a function, not a macro -- in
contrast to macros, compiler macros do not introduce new syntax, but
usually (always?) overlay existing operators).

Compile-time type safety for intentional types provided by third-party
libraries is a kind that I have only seen in Common Lisp so far.
Ironic, isn't it? :)

Can the O'Caml macro processor do that, too?  Has it actually been
used to do something similar?  Would it be more painful to do so than
using compiler macros?  (I have never looked at O'Caml macros, so
these are not rhetorical questions.)

Bye-bye,
Matthias
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c6a7h13p2m86a@corp.supernews.com>
Matthias Benkard wrote:
> Compile-time type safety for intentional types provided by third-party
> libraries is a kind that I have only seen in Common Lisp so far.
> Ironic, isn't it? :)

:-)

> Can the O'Caml macro processor do that, too?  Has it actually been
> used to do something similar?  Would it be more painful to do so than
> using compiler macros?  (I have never looked at O'Caml macros, so
> these are not rhetorical questions.)

Yes, this is the same in OCaml. In fact, Martin Jambon's MicMatch library
extends OCaml's pattern matching to support regular expressions:

  http://martin.jambon.free.fr/micmatch.html

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Russell McManus
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87fy2m2piu.fsf@thelonious.cl-user.org>
Jon Harrop <···@ffconsultancy.com> writes:

> Rayiner Hashem wrote:
>>> Yes, that is true of most term rewriters, of course, and is a subset of
>>> the functionality provided by OCaml's Camlp4 macros or Mathematica.
>> 
>> In what way are Camlp4's macros a superset of Lisp's macros?
>
> OCaml takes Lisp's macros and adds:
>
> 1. Native support for any data structure including lazy sequences and trees,
> not just s-exprs.

Why do you think that Lisp code contains only s-exprs?  This is false.
You can embed any kind of object in lisp source code, and Lisp macros
can operate on any kind of object.

You don't know Lisp, so stop writing about it as if you do.

Some advice: "When you find yourself in a deep hole, the first thing
to do is stop digging".

-russ
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x7inyyrio.fsf@ruckus.brouhaha.com>
Russell McManus <···············@yahoo.com> writes:
> > 1. Native support for any data structure including lazy sequences and trees,
> > not just s-exprs.
> 
> Why do you think that Lisp code contains only s-exprs?  This is false.
> You can embed any kind of object in lisp source code, and Lisp macros
> can operate on any kind of object.
> You don't know Lisp, so stop writing about it as if you do.

How would you write the sequence [1..] in Lisp?  Just wondering.
That's Haskell syntax for the infinite list (1 2 3 4 5 ....) .
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187117612.839913.73820@57g2000hsv.googlegroups.com>
Hi,

> How would you write the sequence [1..] in Lisp?  Just wondering.
> That's Haskell syntax for the infinite list (1 2 3 4 5 ....) .

If you used a lazily evaluated Lisp, you could presumably write
something like the following out of the box: #1=(1 . (mapcar #'1+
#1#)), which is essentially the same you'd write in a variant of
Haskell without special syntax for simple infinite lists (let stuff =
1:(map (+1) stuff) in stuff).  If you used such constructs repeatedly,
you would write a macro that made writing them easier.  Maybe even a
reader macro, if trivial lazy lists are worth that much for your
specific application.  (You could even do that in a strictly evaluated
Lisp, but you'd have to define your own lazy-list type and functions
that operated on them.)

Bye-bye,
Matthias
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187112827.702564.14340@l70g2000hse.googlegroups.com>
On Aug 14, 1:06 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Russell McManus <···············@yahoo.com> writes:
> > > 1. Native support for any data structure including lazy sequences and trees,
> > > not just s-exprs.
>
> > Why do you think that Lisp code contains only s-exprs?  This is false.
> > You can embed any kind of object in lisp source code, and Lisp macros
> > can operate on any kind of object.
> > You don't know Lisp, so stop writing about it as if you do.
>
> How would you write the sequence [1..] in Lisp?  Just wondering.
> That's Haskell syntax for the infinite list (1 2 3 4 5 ....) .

If you really want a literal sequence object in the source code, you'd
define a reader macro that parsed whatever syntax you wanted and
returned a sequence object. This is the same mechanism used to support
literal vectors in source code.

Simple experiment:

(defmacro print-types (&rest elements)
  (loop for x in elements do (format t "~A~%" (type-of x))))

(print-types 1 2 three #(1 2 3) #*10010)

On my machine, this prints:

BIT
(INTEGER 0 536870911)
SYMBOL
(SIMPLE-VECTOR 3)
(SIMPLE-BIT-VECTOR 5)

Note that the print-types is a macro invocation, not a function call.
It's not printing the types of the objects instantiated at run-time,
but the types of the objects that are in the source code at compile-
time. This is why it prints "SYMBOL" for "three", instead of
complaining that the variable "three" is undefined.
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Xtmwi.21504$j7.396026@news.indigo.ie>
Paul Rubin wrote:

> How would you write the sequence [1..] in Lisp?  Just wondering.
> That's Haskell syntax for the infinite list (1 2 3 4 5 ....) .

Well, basic lisp cons lists are strict, you can't use an ordinary cons
list.  But one approach would be to use the "series" package.
http://www.cliki.net/Series
It was covered way back in CLTL2 but didn't make it into ANSI Common
Lisp, unlike e.g. CLOS.

(require :series) 

; set a finite print length, or echoing infinite series back won't end
(setf *print-length* 10) 

; construct a series that counts up from 1
(series:scan-range :from 1)  
-> #Z(1 2 3 4 5 6 7 8 9 10 ...) 

Re macros:

Of course you could pass an infinite series instance to a macro if you
want to. Thoroughly pointless example to illustrate that:

(defmacro foo (a)
           (list 'quote (list 
                (series:collect-sum (series:subseries a 0 10))
                (series:subseries a 10))))

Here, the foo macro expects its argument to be a series, obviously
enough. 

So, if you pass a series to foo (here I just use #. to embed series
instances in the pre-macroexpansion-time source), it will macroexpand
to a list of the sum of the first ten elements of foo and the remainder
of the series.

(list (foo #.(series:scan-range :from 1))  ; 1 2 3 4 ...
      (foo #.(series:scan-range :from 1 :by 2)))  ; 1 3 5 7 ...

->
((55 #Z(11 12 13 14 15 16 17 18 19 20 ...))
 (100 #Z(21 23 25 27 29 31 33 35 37 39 ...)))
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187192748.100490.158440@l22g2000prc.googlegroups.com>
On Aug 13, 11:47 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > On Aug 11, 12:32 am, Jon Harrop <····@ffconsultancy.com> wrote:
> >> Lisp macros are just a rudimentary form of term rewriter, limited to
> >> s-exprs.
>
> > Lisp macros let one usefully rewrite/generate code.  (The code happens
> > to be expressed as s-expressions, but that's a separable issue.)  Term
> > rewriters only rewrite data.
>
> S-exprs are a subset of terms (code is data).

Code can be data when the appropriate mechanisms are involved.  Lisp
macros are an easy-to-use mechanism.  Other mechanisms for
programatically manipulating code are often less easy to use.

And, interestingly enough, lisp macros are not s-expression rewriters,
even though they do transform s-expressions into other s-expressions.
You'd think that a pedant like Harrop would get that right.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f9kbqj$bfg$1@online.de>
Pascal Costanza schrieb:
> Joachim Durchholz wrote:
>> WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
>> higher-order functions. They'd take functions as parameters for the 
>> various conditions and bodies, and return a function that, when run, 
>> executes the constructed conditional resp. loop.
> 
> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
> implemented as higher-order functions. They don't take functions as 
> parameters, they take code fragments as parameters.

Any code fragment that is an expression also is a function. The open 
variables become the parameters, the expression's result the function's 
result, and (if the language is impure) the expression's side effects 
become the function's side effects.

Technically, there may be differences - but they don't buy any expressivity.

>> To make that smooth, you'd need as little syntactic overhead as 
>> possible. Haskell's syntax is minimal enough for that IMHO.
> 
> Macros are not about making syntax smooth, they are about hiding 
> implementation details. If you implement the functional equivalents of 
> WHEN, UNLESS, DOLIST and DOTIMES, you have to impose the requirement on 
> the call sites that they need to wrap code fragments in closures such 
> that their evaluation is correctly delayed and can be controlled by 
> those functions.

In a pure language, it's irrelevant how much evaluation is delayed.

In an impure language, you indeed need to write it as a closure, but I 
think it's just the syntactic overhead for closures in Lisp that deters 
you from accepting that as a possibility.

> What is good about syntactic abstractions, as provided by macros, is 
> that you can change the implementation underneath without changing the 
> call sites.

That's standard issue for any function, too.

 > For example, you could expand into goto statements instead
> of higher-order functions to control evaluation of code fragments by 
> jumping around them, to avoid the overhead of creating closures 
> completely.

Closures are a run-time overhead in Lisp? Then I understand a bit better 
why macros are important.

 > Such a change of implementation strategy is not possible
> with functional abstractions because you are essentially stuck with 
> higher-order functions and closures, and don't have anything else at 
> your disposal.

Yes, but the compiler can optimize the closures out where that's possible.

>> I'm not sure how much of that argument transfers to more complicated 
>> macros.
> 
> Delaying evaluation of parts of a macro invocation is only one of the 
> many possible uses for macros. With macros, you have complete access to 
> the whole macro invocation and can fully deconstruct all the code 
> fragments in whatever way you like.

How do you make sure that the macro doesn't change the semantics of the 
code submitted to it?

 > Closures, functions, and function
> arguments in lazy languages, are in contrast typically opaque: You 
> cannot modify them, and you typically cannot even inspect them.

Right.

 > So with macros, you have a clear increase in expressive power.

And a definitive decrease of control.
A function may ignore a parameter, but it cannot make the parameter do 
something subtly different than what the caller expected. A macro could.

> This is what makes macros especially suitable for embedding 
> domain-specific languages: You are not restricted by the preferred 
> programming style of the host language at all. In a pure functional 
> language, your domain-specific language will essentially always remain a 
> functional language. In a pure object-oriented language, your 
> domain-specific language will essentially always remain an 
> object-oriented language. And so on. As soon as you have macros, you can 
> always break out of whatever the underlying language favors.

The downside is that you pay a lot of syntactic overhead simply to 
select what kind of style you want.

I'm not sure what to make of the fact that the same multi-paradigm 
argument is being made for C++.
I'm also not sure what to make of the fact that Mozart/Oz is even more 
multi-paradigm than C++ and Lisp combined, but without macros.

Regards,
Jo
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186928857.327414.178960@l70g2000hse.googlegroups.com>
> Any code fragment that is an expression also is a function.

Only if your definition of code does not include embedded languages
whose elements don't naturally map to functions.


> How do you make sure that the macro doesn't change the semantics of the
> code submitted to it?

How do you make sure that the function doesn't do something completely
different than the closure submitted to it?


>  > So with macros, you have a clear increase in expressive power.
>
> And a definitive decrease of control.
> A function may ignore a parameter, but it cannot make the parameter do
> something subtly different than what the caller expected. A macro could.

A function could ignore the closure altogether.  It could do something
completely different.

Both macros and functions establish contracts.  Whether they break
these contracts doesn't at all depend on whether they are macros or
functions.  Of course, you can mathematically prove that a function
breaks its contract or not -- but you can do that for macros just as
easily.


> The downside is that you pay a lot of syntactic overhead simply to
> select what kind of style you want.

Syntactic overhead?  As in Lisp, an essentially syntax-free language?
Come on.


> I'm not sure what to make of the fact that the same multi-paradigm
> argument is being made for C++.

I'm not sure what to make of the fact that the same static typing
argument is being made for BASIC.

Umm...  What was it we were talking about again?


> I'm also not sure what to make of the fact that Mozart/Oz is even more
> multi-paradigm than C++ and Lisp combined, but without macros.

It is?

~ Matthias
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13buh1oi404aq65@corp.supernews.com>
Matthias Benkard wrote:
> Both macros and functions establish contracts.  Whether they break
> these contracts doesn't at all depend on whether they are macros or
> functions.  Of course, you can mathematically prove that a function
> breaks its contract or not -- but you can do that for macros just as
> easily.

Type safe macros are non-trivial. Haskell has them IIRC.

>> The downside is that you pay a lot of syntactic overhead simply to
>> select what kind of style you want.
> 
> Syntactic overhead?  As in Lisp, an essentially syntax-free language?
> Come on.

I believe "syntactic overhead" means verbosity in this context.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9rnif$988$1@online.de>
Matthias Benkard schrieb:
>> Any code fragment that is an expression also is a function.
> 
> Only if your definition of code does not include embedded languages
> whose elements don't naturally map to functions.

I'm having trouble imagining such a thing.

Care to give an example?

>> How do you make sure that the macro doesn't change the semantics of
>> the code submitted to it?
> 
> How do you make sure that the function doesn't do something completely
> different than the closure submitted to it?

The HOF can either use the expression or it can ignore it.

That's a lot less degrees of freedom than what macros can do.

> Both macros and functions establish contracts.  Whether they break
> these contracts doesn't at all depend on whether they are macros or
> functions.

Yes, but macros can break the semantics of the functions that are 
submitted to them. A HOF can't do that.

 > Of course, you can mathematically prove that a function
> breaks its contract or not -- but you can do that for macros just as
> easily.

No, it's more difficult for macros because you have to check more 
possibilities.
Except, of course, for those macros that don't rewrite the code 
submitted to them, but these are exactly those macros that can be 
rewritten as HOFs.

>> The downside is that you pay a lot of syntactic overhead simply to
>> select what kind of style you want.
> 
> Syntactic overhead?  As in Lisp, an essentially syntax-free language?
> Come on.

The syntactic overhead is in the keywords. Or macro names. Or whatever.

It's a simple Huffman argument: the more choices you have to encode, the 
more symbols you need for the encoding.

>> I'm not sure what to make of the fact that the same multi-paradigm
>> argument is being made for C++.
> 
> I'm not sure what to make of the fact that the same static typing
> argument is being made for BASIC.

Basic doesn't have user-defined types (except some mutated versions). Or 
HM type inference.

>> I'm also not sure what to make of the fact that Mozart/Oz is even more
>> multi-paradigm than C++ and Lisp combined, but without macros.
> 
> It is?

Yes. It goes the entire hierarchy from restricted with many guarantees 
to unrestricted with few guarantees:
Linear programming, functional programming, imperative programming, OO 
programming / concurrent programming.

In Lisp or C++ (or any FPL other than Mozart/Oz), you'd have to emulate 
linear programming by writing a library.

Regards,
Jo
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187176443.265433.278780@57g2000hsv.googlegroups.com>
Hi,

Joachim Durchholz schrieb:
> >> Any code fragment that is an expression also is a function.
> >
> > Only if your definition of code does not include embedded languages
> > whose elements don't naturally map to functions.
>
> I'm having trouble imagining such a thing.
>
> Care to give an example?

I don't know.  The standard Common Lisp LOOP macro, maybe?


> > Both macros and functions establish contracts.  Whether they break
> > these contracts doesn't at all depend on whether they are macros or
> > functions.
>
> Yes, but macros can break the semantics of the functions that are
> submitted to them. A HOF can't do that.

I don't see the difference.  Either a macro expansion does what I
expect it to, or it doesn't.  It's the same with a function call --
except that functions can't modify the lexical environment of the
caller, but that seems like a minor point to me, because:

(a) in a purely functional language, macros can't do that anyway
because lexical environments are static; and

(b) in all other languages, functions can change the dynamic
environment, which can be just as disastrous (deleting files, killing
threads, changing global variables, ...).


>  > Of course, you can mathematically prove that a function
> > breaks its contract or not -- but you can do that for macros just as
> > easily.
>
> No, it's more difficult for macros because you have to check more
> possibilities.

It can't be generally more difficult, because macros _are_ functions.


> The syntactic overhead is in the keywords. Or macro names. Or whatever.
>
> It's a simple Huffman argument: the more choices you have to encode, the
> more symbols you need for the encoding.

Yeah, well...  By default, yes.  You can change that, though.  You can
still define custom reader syntax if your application (or a part of
it) consists solely of lambda expressions or currying function
applications, but the question is, is it worth the loss in macro
transparency?  Is it worth the cost in editing convenience?  That
probably depends on the application.  Lisp gives you the freedom to
decide for yourself depending on the situation.  Other languages
don't.


> Basic doesn't have user-defined types (except some mutated versions). Or
> HM type inference.

And C++ doesn't have simple syntax, S-expression-powered macros and
safe dynamic typing. :)

Bye-bye,
Matthias
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9uqmg$lm7$1@wildfire.prairienet.org>
 >>> Both macros and functions establish contracts.  Whether they break
 >>> these contracts doesn't at all depend on whether they are macros or
 >>> functions.

 > Joachim Durchholz schrieb:
 >> Yes, but macros can break the semantics of the functions that are
 >> submitted to them. A HOF can't do that.

Matthias Benkard wrote:
 > I don't see the difference.

A macro treats any code passed to it as a data structure, so there's
no built-in semantic relationship between the macro arguments and the
expanded code.  A HOF, on the other hand, can only call or pass its
functions, which will do whatever they do no matter what.  The only
thing the HOF controls is how many times the functions are called,
with what arguments, and other external issues.

 > Either a macro expansion does what I expect it to, or it doesn't.

But the macro has more freedom to do things that the client programmer
might not expect.  It's not guaranteed that any code passed to a macro
will be expanded to do what it seems to do.

 >>> Of course, you can mathematically prove that a function
 >>> breaks its contract or not -- but you can do that for macros
 >>> just as easily.

 >> No, it's more difficult for macros because you have to check more
 >> possibilities.

 > It can't be generally more difficult, because macros _are_ functions.

But it is more difficult in practice, because macros often recieve
huge nested list structures, i.e. code, that they can muck with, whereas
HOFs recieve only black boxes.

-- 
Dan
www.prairienet.org/~dsb/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c69qckeb9p66@corp.supernews.com>
Dan Bensen wrote:
> A macro treats any code passed to it as a data structure, so there's
> no built-in semantic relationship between the macro arguments and the
> expanded code.

Some macro systems provide constraints, such as static typing in Haskell's
macro system. As I understand it, that is very difficult to do and I've no
idea how restrictive it is. OCaml does the same as Lisp and lets you
generate any old junk that the compiler will barf at.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vde4$dc6$1@online.de>
Matthias Benkard schrieb:
> Hi,
> 
> Joachim Durchholz schrieb:
>>>> Any code fragment that is an expression also is a function.
>>> Only if your definition of code does not include embedded languages
>>> whose elements don't naturally map to functions.
>> I'm having trouble imagining such a thing.
>>
>> Care to give an example?
> 
> I don't know.  The standard Common Lisp LOOP macro, maybe?

Hey, if you don't have an example of stuff that doesn't map nicely to 
functions, where's your argument?

And, no, LOOP doesn't cut it. Easily mapped to a higher-order function. 
Even in a pure language, though you'd need some monad infrastructure to 
get the side effects mapped (LOOP doesn't make sense without side effects).

>>> Both macros and functions establish contracts.  Whether they break
>>> these contracts doesn't at all depend on whether they are macros or
>>> functions.
>> Yes, but macros can break the semantics of the functions that are
>> submitted to them. A HOF can't do that.
> 
> I don't see the difference.  Either a macro expansion does what I
> expect it to, or it doesn't.  It's the same with a function call --
> except that functions can't modify the lexical environment of the
> caller, but that seems like a minor point to me, because:
> 
> (a) in a purely functional language, macros can't do that anyway
> because lexical environments are static; and
> 
> (b) in all other languages, functions can change the dynamic
> environment, which can be just as disastrous (deleting files, killing
> threads, changing global variables, ...).

Right, but the ability to modify the submitted function is just an 
additional source of potential bugs.

(The catastrophes mentioned above are the reason why I'm more and more 
into pure languages.)

>>  > Of course, you can mathematically prove that a function
>>> breaks its contract or not -- but you can do that for macros just as
>>> easily.
>> No, it's more difficult for macros because you have to check more
>> possibilities.
> 
> It can't be generally more difficult, because macros _are_ functions.

Turing equivalence is irrelevant to ergonomic issues.
And guarantees *are* an ergonomic issue.

>> The syntactic overhead is in the keywords. Or macro names. Or whatever.
>>
>> It's a simple Huffman argument: the more choices you have to encode, the
>> more symbols you need for the encoding.
> 
> Yeah, well...  By default, yes.  You can change that, though.  You can
> still define custom reader syntax if your application (or a part of
> it) consists solely of lambda expressions or currying function
> applications,

That would just be choosing shorter symbols for specific uses.
Huffman again.

 > but the question is, is it worth the loss in macro
> transparency?  Is it worth the cost in editing convenience?  That
> probably depends on the application.  Lisp gives you the freedom to
> decide for yourself depending on the situation.  Other languages
> don't.

If you don't need macros, you don't need to encode the decision whether 
it's a macro or a function. One bit less to encode.

I concede that a single bit isn't too much of a problem, but I think 
Lisp has far too many mechanisms in general.

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187202852.447420.303050@d55g2000hsg.googlegroups.com>
> And, no, LOOP doesn't cut it. Easily mapped to a higher-order function.

Doable, but not necessarily "easy". Loop macros generally work by
splicing bits of their arguments into different places in a canonical
loop. Obviously you can accomplish the same thing with HOFs, but
achieving the same direct clarity, compactness, and reusability is
difficult with HOFs. Shivers's "Anatomy of a Loop" is illuminating:

http://www.cc.gatech.edu/~shivers/papers/loop.pdf
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vk0d$pmc$1@online.de>
Rayiner Hashem schrieb:
>> And, no, LOOP doesn't cut it. Easily mapped to a higher-order function.

 > Shivers's "Anatomy of a Loop" is illuminating:
 >
 > http://www.cc.gatech.edu/~shivers/papers/loop.pdf

Saying that tail calls are a "goto that passes arguments" is such a 
nonsensical position that I refuse to waste my time on it. Even if that 
idea originated with Steele (even great people can make mistakes).

E.g. fencepost errors are far easier with a loop construct than with a 
tail call, and his argument doesn't account for that effect, so it must 
be wrong. (Don't ask me where exactly it falls down though, I've got no 
time to analyze this in depth right now.)

> Doable, but not necessarily "easy". Loop macros generally work by
> splicing bits of their arguments into different places in a canonical
> loop. Obviously you can accomplish the same thing with HOFs, but
> achieving the same direct clarity, compactness, and reusability is
> difficult with HOFs.

I'm not sure where the problems come from. The basic While loop is 
really easy to emulate with HOFs, and there isn't much interest in more 
intricate loop constructs in the FPL community. (I do have some ideas 
why that might be, but phrasing this well enough is beyond my time 
budget. Sorry for that, but I've got a project in crunch mode at hand, I 
can currently spare only my wait times and nothing more...)

Regards,
Jo
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187207400.495197.107990@q3g2000prf.googlegroups.com>
On Aug 15, 12:26 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Saying that tail calls are a "goto that passes arguments" is such a
> nonsensical position that I refuse to waste my time on it. Even if that
> idea originated with Steele (even great people can make mistakes).

Steele didn't say that a TAIL call is a goto that passes arguments, he
said ANY call is a goto that passes arguments, one of which is the
implicit continuation.

A tail call, then, is simply a call that reuses a previously supplied
continuation, rather than constructing a new one.

-- Scott
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187208833.414000.89540@57g2000hsv.googlegroups.com>
> Saying that tail calls are a "goto that passes arguments" is such a
> nonsensical position that I refuse to waste my time on it. Even if that
> idea originated with Steele (even great people can make mistakes).

It's quite sensical at multiple levels. It becomes really obvious when
you look at the conversion of traditional imperative constructs to
functional constructs in the context of SSA. See Appel's paper: "SSA
is Functional Programming" --- he goes through a very neat example
demonstrating the equivalence of goto'ing to a block with a phi node
and calling a lambda with arguments.

> E.g. fencepost errors are far easier with a loop construct than with a
> tail call:

Fencepost errors are the result of looping with integer index
variables, not a problem with looping in general. You can hit a
fencepost error just as easily with a tail call with an integer
termination condition.

> I'm not sure where the problems come from. The basic While loop is
> really easy to emulate with HOFs, and there isn't much interest in more
> intricate loop constructs in the FPL community.

The basic while-loop is like 0.1% of the power of a good looping
construct. Things get interesting when you're iterating over multiple
data structures, advancing the iteration irregularly in each one, all
while trying to avoid consing too much in an inner loop.
From: Rob Warnock
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Ld2dnVyIx4N5mlnbnZ2dnUVZ_r7inZ2d@speakeasy.net>
Rayiner Hashem  <·······@gmail.com> wrote:
+---------------
| The basic while-loop is like 0.1% of the power of a good looping
| construct. Things get interesting when you're iterating over multiple
| data structures, advancing the iteration irregularly in each one, all
| while trying to avoid consing too much in an inner loop.
+---------------

Exactly what I was trying to illustrate in my "WIRLEX" example
a little while back [in the "Java discovers map" thread]...


-Rob

-----
Rob Warnock			<····@rpw3.org>
627 26th Avenue			<URL:http://rpw3.org/>
San Mateo, CA 94403		(650)572-2607
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fa1t9t$d7g$1@online.de>
Rayiner Hashem schrieb:
>> Saying that tail calls are a "goto that passes arguments" is such a
>> nonsensical position that I refuse to waste my time on it. Even if that
>> idea originated with Steele (even great people can make mistakes).
> 
> It's quite sensical at multiple levels. It becomes really obvious when
> you look at the conversion of traditional imperative constructs to
> functional constructs in the context of SSA. See Appel's paper: "SSA
> is Functional Programming" --- he goes through a very neat example
> demonstrating the equivalence of goto'ing to a block with a phi node
> and calling a lambda with arguments.

The quote may have been too far out of context to properly represent 
Steele's original argument, of course. However, at least in the paper, 
that quote was used along the lines of "tail calls are as bad as gotos", 
and that's definitely nonsense.

>> E.g. fencepost errors are far easier with a loop construct than with a
>> tail call:
> 
> Fencepost errors are the result of looping with integer index
> variables, not a problem with looping in general. You can hit a
> fencepost error just as easily with a tail call with an integer
> termination condition.

Um... no, not really. For a function, preconditions and postconditions 
are far more explicit than for a loop body, so it's more difficult to 
write code that's prone to fencepost error.
Could be related to the question where exactly in the loop the loop 
invariant holds: at the start, somewhere in the middle, immediately 
before the various exit points. For a function, it's more "natural" to 
associate loop invariants in the form of preconditions.
(Not 100% sure how relevant or accurate that is.)

>> I'm not sure where the problems come from. The basic While loop is
>> really easy to emulate with HOFs, and there isn't much interest in more
>> intricate loop constructs in the FPL community.
> 
> The basic while-loop is like 0.1% of the power of a good looping
> construct. Things get interesting when you're iterating over multiple
> data structures, advancing the iteration irregularly in each one, all
> while trying to avoid consing too much in an inner loop.

Ah, OK.

In a strict language, I'd construct a stream for each data structure I'm 
iterating over, and use a function to consume them all (tail calling 
into whatever step is "the next necessary one"). Should be 
straightforward to program.
In a non-strict language, I'd use lazy lists instead of streams (in a 
non-strict language, a list *is* a stream). The list nodes will never be 
constructed if the compiler does the deforestation option, so the 
generated code will do the same as in the strict case.

So while I agree that a good looping construct goes far beyond a simple 
While loop, I'd say that functions already cover even the more complex 
loop constructs quite well.
Of course, not having seen the full power of the most general loop 
construct, I might not be seeing cases where even functions aren't 
enough (though I doubt that).

Regards,
Jo
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-12B3C8.18461516082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

...

> So while I agree that a good looping construct goes far beyond a simple 
> While loop, I'd say that functions already cover even the more complex 
> loop constructs quite well.

That's not my experience. Sometimes the code gets clearer
with a dedicated looping construct.

Note, I'm not in the 'let's all reduce to functions' camp.
I use different hammers for different nails and, sometimes,
screwdrivers. ;-)

> Of course, not having seen the full power of the most general loop 
> construct, I might not be seeing cases where even functions aren't 
> enough (though I doubt that).
> 
> Regards,
> Jo

Common Lisp provides you with a LOOP macro. Some implementations
of LOOP are also user-extensible.

Alternatives are SERIES, which provides the programmer
with stream like iteration, where the iteration constructs
are at compile time reduced to efficient code (via macros).

Another example for a nice general purpose LOOPing construct
is ITERATE.
http://common-lisp.net/project/iterate/
http://common-lisp.net/project/iterate/doc/index.html

-- 
http://lispm.dyndns.org
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187291560.513149.71530@k79g2000hse.googlegroups.com>
> The quote may have been too far out of context to properly represent
> Steele's original argument, of course. However, at least in the paper,
> that quote was used along the lines of "tail calls are as bad as gotos",
> and that's definitely nonsense.

I don't think the intention of the comparison is to say "tail-calls
considered harmful". Rather, it's to say that tail-calls are a
relatively low-level mechanism for enabling iteration, but that level
is not the most appropriate level at which to express a complex
iteration.

> Could be related to the question where exactly in the loop the loop
> invariant holds: at the start, somewhere in the middle, immediately
> before the various exit points.

Shivers's discussion of the relationship of dominance and scope is
directed related to what you're getting at. In a pure, lexically-
scoped language, any invariants that hold at the point where a name is
bound will hold at any use within the syntactic body of the binding
construct. This is directly related to the idea of dominance in
imperative languages --- any invariant that holds at a definition will
hold at any uses dominated by that definition. I think the problem you
see with traditional looping constructs is the result of a mismatch
between dominance and scoping in the syntax of these constructs.
Specifically:

x = 0
while(x < 10) {
  ...
  x++
  ...
}

In this example, the syntactic body of the loop is not dominated by
any of the apparent definitions of "x". The invariant "x < 10" this
does not necessarily hold at any point within the body of the loop. In
contrast, consider how this loop would be expressed in a Lisp
iteration package:

(iter (for i from 0 to 10)
  ...)

The definition of "i" dominates every point within the body of the
loop. Any invariant that holds at the definition, in this case the
fact that 0 <= i < 10, will hold anywhere in the loop-body.

> In a strict language, I'd construct a stream for each data structure I'm
> iterating over, and use a function to consume them all (tail calling
> into whatever step is "the next necessary one"). Should be
> straightforward to program.

The question is one of composability, as Shivers points out. It's easy
to write a data structure and constructor for any particular loop you
want to express, but how do you break the various loop concepts into
orthogonal pieces so they can be easily composed to express lots of
different kinds of loops? How do you then make the whole thing easily
extendible by the user? Just as important, how do you do all this
without creating lot's of temporary data structures within the loop?

Consider something like: http://common-lisp.net/project/iterate/doc/index.html

I have a hard time imaging how to build a scalable, syntactically
pleasing version of something like this using just HOFs.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fa2fhh$e7j$1@online.de>
Rayiner Hashem schrieb:
>> The quote may have been too far out of context to properly represent
>> Steele's original argument, of course. However, at least in the paper,
>> that quote was used along the lines of "tail calls are as bad as gotos",
>> and that's definitely nonsense.
> 
> I don't think the intention of the comparison is to say "tail-calls
> considered harmful". Rather, it's to say that tail-calls are a
> relatively low-level mechanism for enabling iteration, but that level
> is not the most appropriate level at which to express a complex
> iteration.

Well, OK, I could agree with that. Whether a call is a tail call or not 
is not usually a consideration when you're trying to get an iteration right.

>> Could be related to the question where exactly in the loop the loop
>> invariant holds: at the start, somewhere in the middle, immediately
>> before the various exit points.
> 
> Shivers's discussion of the relationship of dominance and scope is
> directed related to what you're getting at. In a pure, lexically-
> scoped language, any invariants that hold at the point where a name is
> bound will hold at any use within the syntactic body of the binding
> construct. This is directly related to the idea of dominance in
> imperative languages --- any invariant that holds at a definition will
> hold at any uses dominated by that definition. I think the problem you
> see with traditional looping constructs is the result of a mismatch
> between dominance and scoping in the syntax of these constructs.

Right.

Though that's just one aspect. Multiple exits can cause trouble, too 
(moving code across an exit point becomes a somewhat more complicated 
operation, because that has ramifications for the code that follows the 
loop).

>> In a strict language, I'd construct a stream for each data structure I'm
>> iterating over, and use a function to consume them all (tail calling
>> into whatever step is "the next necessary one"). Should be
>> straightforward to program.
> 
> The question is one of composability, as Shivers points out. It's easy
> to write a data structure and constructor for any particular loop you
> want to express, but how do you break the various loop concepts into
> orthogonal pieces so they can be easily composed to express lots of
> different kinds of loops? How do you then make the whole thing easily
> extendible by the user? Just as important, how do you do all this
> without creating lot's of temporary data structures within the loop?

Composability is mostly affected by the effects of updates.
If you're doing HOFs, you're usually not using updates anyway (since 
HOFs make it more difficult to track when exactly an update with 
happen). So with a mostly-pure style of programming, composability is 
not an issue.
(Might be another reason why looping constructs don't meet interest in 
non-Lisp FPLs, or perhaps FPLs that don't use macros.)

> Consider something like: http://common-lisp.net/project/iterate/doc/index.html
> 
> I have a hard time imaging how to build a scalable, syntactically
> pleasing version of something like this using just HOFs.

I have a hard time imagining what this kind of stuff would be good for 
when your idiomatic code is mostly HOFs.
(I have to admit I haven't looked too deeply into it; lack of time right 
now. I'll take a deeper look tomorrow if I find the time.)

Regards,
Jo
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187301693.598483.9480@d55g2000hsg.googlegroups.com>
> Though that's just one aspect. Multiple exits can cause trouble, too
> (moving code across an exit point becomes a somewhat more complicated
> operation, because that has ramifications for the code that follows the
> loop).

When you need to exit a loop, you need to exit the loop. Expressing an
early exit or skipped iteration is relatively easy and clean using an
iteration construct. Now, you can sometimes avoid an early exit by
changing the data structure you're iterating over, but it's hard to do
this without consing a lot.

> Composability is mostly affected by the effects of updates.

Not really. I'm more talking about the composability of iteration
drivers and accumulators. Traditional interfaces like map, reduce,
fold, etc don't easily scale in that direction.
From: Neelakantan Krishnaswami
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfcap1n.5gi.neelk@gs3106.sp.cs.cmu.edu>
In article <<······················@d55g2000hsg.googlegroups.com>>,
Rayiner Hashem <·······@gmail.com> wrote:
> 
> Not really. I'm more talking about the composability of iteration
> drivers and accumulators. Traditional interfaces like map, reduce,
> fold, etc don't easily scale in that direction.

I took a look at the iterate manual, and think it's pretty nice.
Reduce and fold obviously can't do what it can, because they
correspond to the accumulators, and not the drivers, of iterate.

You need unfolds to model the drivers. Here's the definition of 
unfold:

  unfold :: (b -> Maybe (a, b)) -> [a]
  unfold f seed = 
    case f seed of
      Nothing -> []
      Just(x, seed) -> x : (unfold f seed)

Then, you can model parallel iteration with a zip:

  zip :: [a] -> [b] -> [(a, b)]
  zip xs ys = 
    unfold merge (xs, ys)
    where merge ([], _)          = Nothing
          merge (_, [])          = Nothing
          merge (x : xs, y : ys) = Just((x, y), (xs, ys))

So the product of the sums of the elements of two lists could be
written using iterate as:

  (iter (x in xs)
        (y in ys)
        (if (= (+ x y) 0) (leave 0))
        (multiply (+ x y) into z)
        (finally z))

Using a fold and a zip, you'd write:

    fold prodsum 1 (zip (xs, ys))
      where prodsum x y acc | (x + y = 0) =  0
                            | otherwise   = acc * x + y

This will terminate early, since these are all lazy streams. You can
also avoid consing intermediate lists by defining fusion rules for the
compiler. (E.g, map f (map g list) ==> map (f . g) list)

-- 
Neel R. Krishnaswami
·····@cs.cmu.edu
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13calpek390ctb2@corp.supernews.com>
Rayiner Hashem wrote:
> The question is one of composability, as Shivers points out. It's easy
> to write a data structure and constructor for any particular loop you
> want to express, but how do you break the various loop concepts into
> orthogonal pieces so they can be easily composed to express lots of
> different kinds of loops? How do you then make the whole thing easily
> extendible by the user? Just as important, how do you do all this
> without creating lot's of temporary data structures within the loop?

That problem is best solved by writing in a declarative style using purely
functional constructs and forgetting about loops altogether.

> Consider something like:
> http://common-lisp.net/project/iterate/doc/index.html
> 
> I have a hard time imaging how to build a scalable, syntactically
> pleasing version of something like this using just HOFs.

Iteration isn't an issue in a declarative style. Look at the Burrows-Wheeler
example recently brought up on the Haskell mailing list, for example. All
of these problems just disappear.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187293740.001419.57460@x35g2000prf.googlegroups.com>
On Aug 16, 9:17 am, Joachim Durchholz <····@durchholz.org> wrote:
> The quote may have been too far out of context to properly represent
> Steele's original argument, of course. However, at least in the paper,
> that quote was used along the lines of "tail calls are as bad as gotos",
> and that's definitely nonsense.

It is nonsense, that's true -- but it's also nonsense to attribute it
to Steele, who never said anything of the kind.  It's not just out of
context; it's flat wrong.

(The antecedent of your phrase "the paper" is unclear.  I'm assuming
you mean one of Steele's.  If I'm mistaken, please clarify.)

-- Scott
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fa2fl8$e7j$2@online.de>
Scott Burson schrieb:
> On Aug 16, 9:17 am, Joachim Durchholz <····@durchholz.org> wrote:
>> The quote may have been too far out of context to properly represent
>> Steele's original argument, of course. However, at least in the paper,
>> that quote was used along the lines of "tail calls are as bad as gotos",
>> and that's definitely nonsense.
> 
> It is nonsense, that's true -- but it's also nonsense to attribute it
> to Steele, who never said anything of the kind.  It's not just out of
> context; it's flat wrong.
> 
> (The antecedent of your phrase "the paper" is unclear.  I'm assuming
> you mean one of Steele's.  If I'm mistaken, please clarify.)

No, one of the previous post mentioned a paper, which in turn had that 
Steele quote.

So yes, there is probably a Steele paper that somewhere, in some way, 
relates tail calls and gotos. Look back a few messages to find the paper.

Regards,
Jo
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187304668.131514.253420@i13g2000prf.googlegroups.com>
On Aug 16, 2:30 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Scott Burson schrieb:
>
> > On Aug 16, 9:17 am, Joachim Durchholz <····@durchholz.org> wrote:
> >> The quote may have been too far out of context to properly represent
> >> Steele's original argument, of course. However, at least in the paper,
> >> that quote was used along the lines of "tail calls are as bad as gotos",
> >> and that's definitely nonsense.
>
> [O]ne of the previous post mentioned a paper, which in turn had that
> Steele quote.
>
> So yes, there is probably a Steele paper that somewhere, in some way,
> relates tail calls and gotos.

Of course there are Steele (and Sussman) papers relating tail calls
and gotos; I read them long ago.  What there is not is a Steele paper
making the value judgment you impute to him, that tail calls are bad
because they're like gotos.  That was NOT the point he was making.  If
the paper to which you are referring says otherwise, it's wrong.

I'm not going to dig back through this monstrous thread to find the
reference.  (Well, maybe I will later, if curiosity overtakes me, but
not now.)  I'm happy to believe this is a simple misunderstanding
somewhere along the way, but I want to get it cleared up so you don't
keep thinking that Steele said such a ridiculous thing.  (And it
really doesn't make any sense at all.  Scheme was invented to make
tail calls "work"!!)

-- Scott
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187305431.288992.226150@19g2000hsx.googlegroups.com>
> Of course there are Steele (and Sussman) papers relating tail calls
> and gotos; I read them long ago.  What there is not is a Steele paper
> making the value judgment you impute to him, that tail calls are bad
> because they're like gotos.  That was NOT the point he was making.  If
> the paper to which you are referring says otherwise, it's wrong.

The paper in question is: http://www.cc.gatech.edu/~shivers/papers/loop.pdf

There is also a YouTube video:

http://video.google.com/videoplay?docid=-3704713569771882785
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xhcmz9iki.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> The paper in question is: http://www.cc.gatech.edu/~shivers/papers/loop.pdf

It looks to me like Haskell list comprehension syntax takes care of
most of the examples I've seen in those loop macro rants.  I always
though that CL loop was an abomination.  That iterate thing mentioned
earlier looks better, but I'm still not sure what to conclude other
than that Lisp syntax is near-unusable without all those band-aids.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187308000.299094.206410@22g2000hsm.googlegroups.com>
 > It looks to me like Haskell list comprehension syntax takes care of
> most of the examples I've seen in those loop macro rants.  

List comprehensions address a very small portion of the design space
of iteration structures.

> I always
> though that CL loop was an abomination.  That iterate thing mentioned
> earlier looks better, but I'm still not sure what to conclude other
> than that Lisp syntax is near-unusable without all those band-aids.

Far from being band-aids, "iterate" is a prime example of good
software engineering. It's the micro-kernel principle --- there is no
need to do something in the "kernel" that can just as easily be done
in "userspace". Most higher-level syntactic structures can easily be
implemented as macros, so why not do so? It simplifies the core of the
compiler, while giving the user the freedom to swap stuff out to suit
his needs.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xhcmzf2e7.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
>  > It looks to me like Haskell list comprehension syntax takes care of
> > most of the examples I've seen in those loop macro rants.  
> 
> List comprehensions address a very small portion of the design space
> of iteration structures.

I only looked at the examples in those papers.  List comprehensions
(or a monad map in the case of the tree traversal) appeared to handle
those examples ok.  If you have some other examples to post, that
would be nice.  I've never felt like I needed anything like LOOP even
when using Lisp.

> Far from being band-aids, "iterate" is a prime example of good
> software engineering. It's the micro-kernel principle --- there is no
> need to do something in the "kernel" that can just as easily be done
> in "userspace". Most higher-level syntactic structures can easily be
> implemented as macros, so why not do so? It simplifies the core of the
> compiler, while giving the user the freedom to swap stuff out to suit
> his needs.

Moving features from the kernel to userspace is fine if the features
are actually needed.  If they're not needed, it's better to eliminate
them than to move them.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187310609.472527.248780@w3g2000hsg.googlegroups.com>
> Moving features from the kernel to userspace is fine if the features
> are actually needed.  If they're not needed, it's better to eliminate
> them than to move them.

I was referring to the "band-aid" jab. "Band-aid" implies hack. There
is nothing hackish about loop or iterate (or defun or defclass or or
cond or case or all other syntactic forms built with macros). They use
the macro system for the exact purposes for which it is intended.

As for "actually needed", I don't buy it. Not even in the pure-
functional case, much less in general code involving mutation.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x4piz570s.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> As for "actually needed", I don't buy it. Not even in the pure-
> functional case, much less in general code involving mutation.

I'm still waiting for examples.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187311866.782318.12580@a39g2000hsc.googlegroups.com>
On Aug 16, 8:39 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Rayiner Hashem <·······@gmail.com> writes:
> > As for "actually needed", I don't buy it. Not even in the pure-
> > functional case, much less in general code involving mutation.
>
> I'm still waiting for examples.

No thanks. I'm not going to spend another 50 posts being told "well,
list comprehensions can't do that, but that's because you shouldn't be
doing that anyway!" (1)

Watch the video I linked to. Olin Shivers is a smart guy. I'm sure
he's aware of list comprehensions. If he thought it was a complete
solution, he would've written a list comprehensions macro for scheme,
not a looping package... (2)

(1) Which is how we got into this sub-thread to begin with ---
somebody said "general loop packages are hard to do with HOFs", and
got back "well, you really don't need composeable looping!"

(2) There's even some bits in there for the type-theory folks...
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xir7ft0tu.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> Watch the video I linked to. Olin Shivers is a smart guy. I'm sure
> he's aware of list comprehensions. 

He mentions them briefly in section 13 of the paper but handwaves
about a supposed difficulty of embedding SQL notation in them.  I
don't understand what issue he's referring to there.  I'd expect an
SQL query result to just be another IO array to iterate through.

He does explain some problems with CL's loop macro in section 2.

> If he thought it was a complete solution, he would've written a list
> comprehensions macro for scheme, not a looping package... (2)

Maybe he'd have to redesign too many of Scheme's underlying
conventions to make that fly.

> (2) There's even some bits in there for the type-theory folks...

I can't tell quite what's going on in those sections (9 and 10) but
it's interesting that he apparently brings a type-inferred
sub-language into Scheme to express these loops.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187314709.268186.325810@d55g2000hsg.googlegroups.com>
> Maybe he'd have to redesign too many of Scheme's underlying
> conventions to make that fly.

What conventions are there to redesign in Scheme? This is complete
speculation.

> I can't tell quite what's going on in those sections (9 and 10) but
> it's interesting that he apparently brings a type-inferred
> sub-language into Scheme to express these loops.

It's alluded to in the presentation. He uses a type system that
expresses the variables which dominate or partially dominate a program
point. He uses type-inference this language* to compute the dominance
information he needs to implement his scoping rule. The -> Scheme
translator uses these type annotations to implement the derived
scoping arrangement in scheme code.

*) It's a neat example of the relationship between flow-analysis and
type inference
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xeji252p7.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> > Maybe he'd have to redesign too many of Scheme's underlying
> > conventions to make that fly.
> 
> What conventions are there to redesign in Scheme? This is complete
> speculation.

One obvious one is that all container objects should expose a lazy
iterator function, like Python's __iter__ methods or Haskell's fmap
operation on monads.

> *) It's a neat example of the relationship between flow-analysis and
> type inference

Right.  One of the things that got me interested in Haskell is how it
uses type inference to enforce flow constraints.  For example, the STM
monad lets you operate on transactional variables inside an atomic
section, and the type system prevents those variables from leaking
outside the section.
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187307962.358635.54040@m37g2000prh.googlegroups.com>
On Aug 16, 4:03 pm, Rayiner Hashem <·······@gmail.com> wrote:
> > Of course there are Steele (and Sussman) papers relating tail calls
> > and gotos; I read them long ago.  What there is not is a Steele paper
> > making the value judgment you impute to him, that tail calls are bad
> > because they're like gotos.  That was NOT the point he was making.  If
> > the paper to which you are referring says otherwise, it's wrong.
>
> The paper in question is:http://www.cc.gatech.edu/~shivers/papers/loop.pdf

Ah, thanks for the link.  I find what appears to be the relevant text
on the first page:

[[As was popularised by Steele [14, 15, 16, 17], a tail call is
essentially a "goto that passes arguments." So writing loops with tail
calls is just writing them with gotos. Yet, it has long been accepted
in the programming-language community that goto is a low-level and
obfuscatory control operator, a position stated by Dijkstra's "Goto
considered harmful" letter [5].]]

This text clearly does not claim that Steele thinks that tail-calls
are bad.  It doesn't even say that Dijkstra thought so, though I
suppose he might have.  It merely juxtaposes Steele's analysis with
Dijkstra's value judgment to suggest such a conclusion to the reader.

The paper doesn't go on to argue, though, that all tail calls are bad
(what would that mean? there's certainly no way to eliminate them).
What Shivers does argue here is that writing _loops_ in tail-recursive
form is often stylistically problematic.  Well, not to get into that
discussion, but it's clear that the one with the value judgment here
is Shivers, not Steele; and I think it's also pretty clear that
Joachim didn't recount Shivers' opinion very precisely.

(For the record, I also think Shivers' reasoning is a little muddled
here.  The problems he goes on to describe with loops written tail-
recursively are not rooted in the properties of gotos that Dijkstra
was complaining about, which had much more to do with the ease with
which they can generate unstructured control flow.  The problem is
simply that, in at least some cases, such loops obscure the structure
of the computation rather than expressing it clearly.)

-- Scott
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187308568.838057.301390@r29g2000hsg.googlegroups.com>
> (For the record, I also think Shivers' reasoning is a little muddled
> here.  The problems he goes on to describe with loops written tail-
> recursively are not rooted in the properties of gotos that Dijkstra
> was complaining about, which had much more to do with the ease with
> which they can generate unstructured control flow.  

The semantics of a tail-recursive call are identical to the semantics
of a goto from a control-flow perspective. You can build exactly the
same sorts of unstructured CFGs with each in exactly the same way.
Similarly, if you express the same CFG with both, you'll get the same
unstructured result.

Ultimately, though, focusing on that aspect misses the point. The
control-flow of any given loop is what it is. The focus of the paper,
then, is to ease the construction of complex control flow using a
macro package.
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187321470.719710.192260@q4g2000prc.googlegroups.com>
On Aug 16, 4:56 pm, Rayiner Hashem <·······@gmail.com> wrote:
> The semantics of a tail-recursive call are identical to the semantics
> of a goto from a control-flow perspective.
>
> Ultimately, though, focusing on that aspect misses the point.

Yes, exactly :)

-- Scott
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fa48oe$5v7$1@online.de>
Scott Burson schrieb:
> I'm not going to dig back through this monstrous thread to find the
> reference.  (Well, maybe I will later, if curiosity overtakes me, but
> not now.) 

Same with me ;-)

> I'm happy to believe this is a simple misunderstanding
> somewhere along the way, but I want to get it cleared up so you don't
> keep thinking that Steele said such a ridiculous thing.

You're late - I've been assuming a misquote (or possibly me 
misunderstanding the quote) since.

Regards,
Jo
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3uhcn0eb77.fsf@hod.lan.m-e-leypold.de>
Joachim Durchholz wrote:

> I'm not sure where the problems come from. The basic While loop is
> really easy to emulate with HOFs, and there isn't much interest in
> more intricate loop constructs in the FPL community. (I do have some
> ideas why that might be, but phrasing this well enough is beyond my

May I make a suggestion? In functional programming the tradiational
while- and other loops are usually replaced by fold() and similar
operations. Since "iterating" over a data structure -- even in the
more general sense of e.g. walking a tree -- can be captured so easily
as HOF-pattern, the need to define ones own control structures is
greatly diminished. IMHO this makes the discussion moot wether this it
is and advantage in FP to define control structures with macros: In FP
it isn't and they aren't required because the natural patterns to
iterate over data can be easily captured in HOFs (see fold and its
ilk).

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural       language Minim
Date: 
Message-ID: <5ibk00F3ot9kuU1@mid.individual.net>
Joachim Durchholz wrote:
> Pascal Costanza schrieb:
>> Joachim Durchholz wrote:
>>> WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as 
>>> higher-order functions. They'd take functions as parameters for the 
>>> various conditions and bodies, and return a function that, when run, 
>>> executes the constructed conditional resp. loop.
>>
>> This is wrong. WHEN, UNLESS, DOLIST and DOTIMES cannot (!) be 
>> implemented as higher-order functions. They don't take functions as 
>> parameters, they take code fragments as parameters.
> 
> Any code fragment that is an expression also is a function. The open 
> variables become the parameters, the expression's result the function's 
> result, and (if the language is impure) the expression's side effects 
> become the function's side effects.
> 
> Technically, there may be differences - but they don't buy any 
> expressivity.

Yes, they do. You can't tear apart a function and reconstruct it to 
potentially something different or additional because functions are 
typically opaque - the only thing you can do with functions is calling them.

>> What is good about syntactic abstractions, as provided by macros, is 
>> that you can change the implementation underneath without changing the 
>> call sites.
> 
> That's standard issue for any function, too.

Here are two different implementations for 'while:

(defun while-fun (predicate thunk)
   (when (funcall predicate)
     (funcall thunk)
     (while-fun predicate thunk)))

(defmacro while1 (test &body body)
   `(while-fun (lambda () ,test)
      (lambda () ,@body)))

(defmacro while2 (test &body body)
   `(tagbody
      :test (unless ,test (go :end))
            ,@body
            (go :test)
      :end nil))

So consider the following test case:

(while (< i n)
   (setq i (+ i 1)))

With 'while1, the macro expansion looks like this:

(while-fun (lambda () (< i n))
   (lambda () (setq i (+ i 1))))


With 'while2, it looks like that:

(tagbody
   :test (unless (< i n) (go :end))
         (setq i (+ i 1))
         (go :test)
   :end  nil)

So the while1 uses a functional abstraction internally, and while2 uses 
a direct imperative implementation. However, in both cases, the call 
site looks exactly the same.

How do you achieve the same thing with only functional abstractions?

>> For example, you could expand into goto statements instead
>> of higher-order functions to control evaluation of code fragments by 
>> jumping around them, to avoid the overhead of creating closures 
>> completely.
> 
> Closures are a run-time overhead in Lisp? Then I understand a bit better 
> why macros are important.

They are in all languages, because you cannot inline them in the general 
case (when they are used as first-class values).

>> Such a change of implementation strategy is not possible
>> with functional abstractions because you are essentially stuck with 
>> higher-order functions and closures, and don't have anything else at 
>> your disposal.
> 
> Yes, but the compiler can optimize the closures out where that's possible.

The compiler can only optimize what the compiler has anticipated. Macros 
allow you to embed new general-purpose or domain-specific language 
constructs, and by allowing non-intrusive changes in implementation 
strategies, they allow custom optimization strategies. That's no 
surprise, because one way to look at macros is to realize that they give 
you hooks into the compiler. Most languages don't give you such hooks, 
but consider the compiler as a closed tool that you cannot customize in 
any way.

>>> I'm not sure how much of that argument transfers to more complicated 
>>> macros.
>>
>> Delaying evaluation of parts of a macro invocation is only one of the 
>> many possible uses for macros. With macros, you have complete access 
>> to the whole macro invocation and can fully deconstruct all the code 
>> fragments in whatever way you like.
> 
> How do you make sure that the macro doesn't change the semantics of the 
> code submitted to it?

In the same ways that you ensure that other abstraction mechanisms work 
correctly: By proving correctness of the macro expanders, by debugging, 
by testing, etc.

>> Closures, functions, and function
>> arguments in lazy languages, are in contrast typically opaque: You 
>> cannot modify them, and you typically cannot even inspect them.
> 
> Right.
> 
>> So with macros, you have a clear increase in expressive power.
> 
> And a definitive decrease of control.
> A function may ignore a parameter, but it cannot make the parameter do 
> something subtly different than what the caller expected. A macro could.

You seem to be very afraid of a construct that others regularly use 
without significant problems.

>> This is what makes macros especially suitable for embedding 
>> domain-specific languages: You are not restricted by the preferred 
>> programming style of the host language at all. In a pure functional 
>> language, your domain-specific language will essentially always remain 
>> a functional language. In a pure object-oriented language, your 
>> domain-specific language will essentially always remain an 
>> object-oriented language. And so on. As soon as you have macros, you 
>> can always break out of whatever the underlying language favors.
> 
> The downside is that you pay a lot of syntactic overhead simply to 
> select what kind of style you want.
> 
> I'm not sure what to make of the fact that the same multi-paradigm 
> argument is being made for C++.
> I'm also not sure what to make of the fact that Mozart/Oz is even more 
> multi-paradigm than C++ and Lisp combined, but without macros.

If you believe in single-paradigm languages and language purity, then 
there is probably no way that I can convince you. Fortunately, this is 
also not my goal.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1fh1ace1rla5@corp.supernews.com>
Pascal Costanza wrote:
> Joachim Durchholz wrote:
>> Technically, there may be differences - but they don't buy any
>> expressivity.
> 
> Yes, they do.

Turing argument.

> You can't tear apart a function and reconstruct it to 
> potentially something different or additional because functions are
> typically opaque - the only thing you can do with functions is calling
> them.

That is just parameterizing a function over another function, which is
exactly what HOFs do. This, like most uses of Lisp macros, is done
trivially without macros in any modern FPL.

>>> What is good about syntactic abstractions, as provided by macros, is
>>> that you can change the implementation underneath without changing the
>>> call sites.
>> 
>> That's standard issue for any function, too.
> 
> Here are two different implementations for 'while:
> 
> (defun while-fun (predicate thunk)
>    (when (funcall predicate)
>      (funcall thunk)
>      (while-fun predicate thunk)))
> 
> (defmacro while1 (test &body body)
>    `(while-fun (lambda () ,test)
>       (lambda () ,@body)))
> 
> (defmacro while2 (test &body body)
>    `(tagbody
>       :test (unless ,test (go :end))
>             ,@body
>             (go :test)
>       :end nil))
> 
> So consider the following test case:
> 
> (while (< i n)
>    (setq i (+ i 1)))
> 
> With 'while1, the macro expansion looks like this:
> 
> (while-fun (lambda () (< i n))
>    (lambda () (setq i (+ i 1))))
> 
> 
> With 'while2, it looks like that:
> 
> (tagbody
>    :test (unless (< i n) (go :end))
>          (setq i (+ i 1))
>          (go :test)
>    :end  nil)
> 
> So the while1 uses a functional abstraction internally, and while2 uses
> a direct imperative implementation. However, in both cases, the call
> site looks exactly the same.
> 
> How do you achieve the same thing with only functional abstractions?

As Jo said, you use HOFs:

# let rec while1 p f x = if p x then (f x; while1 p f x);;
val while1 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>

# let while2 p f x = while p x do f x done;;
val while2 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>

If you couldn't do that, you shouldn't be using macros.

>> Closures are a run-time overhead in Lisp? Then I understand a bit better
>> why macros are important.
> 
> They are in all languages, because you cannot inline them in the general
> case (when they are used as first-class values).

If that unsubstantiated over-generalization were true you would
mean "closures might have a run-time overhead in any language".

>>> Such a change of implementation strategy is not possible
>>> with functional abstractions because you are essentially stuck with
>>> higher-order functions and closures, and don't have anything else at
>>> your disposal.
>> 
>> Yes, but the compiler can optimize the closures out where that's
>> possible.
> 
> The compiler can only optimize what the compiler has anticipated. Macros
> allow you to embed new general-purpose or domain-specific language
> constructs, and by allowing non-intrusive changes in implementation
> strategies, they allow custom optimization strategies. That's no
> surprise, because one way to look at macros is to realize that they give
> you hooks into the compiler. Most languages don't give you such hooks,
> but consider the compiler as a closed tool that you cannot customize in
> any way.

Macros are just rudimentary term rewriters. Lisp macros are just rudimentary
term rewriters without pattern matching on terms and where terms may only
be expressed using s-exprs.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187040389.365700.92020@19g2000hsx.googlegroups.com>
> Macros are just rudimentary term rewriters. Lisp macros are just rudimentary
> term rewriters without pattern matching on terms and where terms may only
> be expressed using s-exprs.

A macro doesn't have to do anything obviously resembling term
rewriting (though obviously everything it did could be reduced to term
rewriting in an appropriately powerful rewrite system). Eg:

(defmacro foo ()
  (format t "Hello!~%"))

As regular Lisp functions, they are no more or less "rudimentary" or
limited than any other Lisp function, and are definitely less
"rudimentary" than basic term rewriters which are not Turing-complete.
They can use any pattern-matching library you choose to implement if
that's what you want, or whatever other code your domain constraints
might require.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1r5fnp1vcqe0@corp.supernews.com>
Rayiner Hashem wrote:
>> Macros are just rudimentary term rewriters. Lisp macros are just
>> rudimentary term rewriters without pattern matching on terms and where
>> terms may only be expressed using s-exprs.
> 
> A macro doesn't have to do anything obviously resembling term
> rewriting

Macros only rewrite terms.

> (though obviously everything it did could be reduced to term 
> rewriting in an appropriately powerful rewrite system).

There is no reduction required: macros literally just rewrite terms.

> Eg: 
> 
> (defmacro foo ()
>   (format t "Hello!~%"))

That takes this term:

  ()

and replaces it (rewrites) with this term:

  (format t "Hello!~%")

with an implicit error if the term given was not ().

That rewrite rule can be written in Mathematica:

  Foo[{}] := Print["Hello"]
  Foo[_] := Error

or OCaml:

  let foo = function
  | <:expr< () >> -> <:expr< print_endline "Hello!" >>
  | _ -> invalid_arg "foo"

> As regular Lisp functions, they are no more or less "rudimentary" or
> limited than any other Lisp function,

Exactly.

> and are definitely less 
> "rudimentary" than basic term rewriters which are not Turing-complete.

Rewriting is Turing Complete.

> They can use any pattern-matching library you choose to implement...

Greenspun. I could start with assembler and "choose to implement" a pattern
matcher. Or I could start with a rock and some string and choose to
implement a car to drive to work in.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187057083.188507.102460@r34g2000hsd.googlegroups.com>
> > Eg:
>
> > (defmacro foo ()
> >   (format t "Hello!~%"))
>
> That takes this term:
>
>   ()
>
> and replaces it (rewrites) with this term:
>
>   (format t "Hello!~%")

Uh, no. It takes () and returns (). It's only interesting action is to
produce a side-effect: printing "Hello!" to standard output, at macro-
expansion time. To dive home the point:

(defmacro bar ()
  (connect-to-pizza-hut)
  (order-pizza :large :anchovies)
  (charge-to-visa-card :debit t)
  nil)

In terms of rewrites, this macro is identical to the previous one: () -
> (). Obviously, it has a completely different effect on the
compilation environment.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187195417.934218.216300@m37g2000prh.googlegroups.com>
On Aug 13, 4:35 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Macros only rewrite terms.

The only way to make that sentence true requires defining "rewrite" as
general computation.

Unless Harrop has something more powerful, the "only" is unjustified.

> Rewriting is Turing Complete.

And he doesn't.

The question then becomes "is the term rewriting provided by lisp
macros useful".

Since Harrop has pointed out that OCaml includes a mechanism with what
he claims to be similar power and utility, he can't argue that lisp
macros aren't useful.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0lkccn3fh.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

> On Aug 13, 4:35 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> Macros only rewrite terms.
>
> The only way to make that sentence true requires defining "rewrite" as
> general computation.
>
> Unless Harrop has something more powerful, the "only" is unjustified.
>
>> Rewriting is Turing Complete.
>
> And he doesn't.
>
> The question then becomes "is the term rewriting provided by lisp
> macros useful".
>
> Since Harrop has pointed out that OCaml includes a mechanism with what
> he claims to be similar power and utility, he can't argue that lisp
> macros aren't useful.


Just for the sake of logic in usenet discussions: Perhaps Jon has
pointed out that OCaml includes a mechanism that isn't useful. 

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5ic3cqF3oj09uU1@mid.individual.net>
Jon Harrop wrote:

>> So the while1 uses a functional abstraction internally, and while2 uses
>> a direct imperative implementation. However, in both cases, the call
>> site looks exactly the same.
>>
>> How do you achieve the same thing with only functional abstractions?
> 
> As Jo said, you use HOFs:
> 
> # let rec while1 p f x = if p x then (f x; while1 p f x);;
> val while1 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
> 
> # let while2 p f x = while p x do f x done;;
> val while2 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>

Your while2 is not the same as my while2. There is no macro expansion 
going on, and it doesn't use the same implementation strategy as in my 
version. The latter is especially important: You have no control over 
the details of the implementation strategy.

Since with macros, we have such control, I was for example able to 
provide a very efficient implementation of the minim language. We are 
still waiting for your version.

> Macros are just rudimentary term rewriters. Lisp macros are just rudimentary
> term rewriters without pattern matching on terms and where terms may only
> be expressed using s-exprs.

Since s-expressions consists of lists, symbols and _any other data 
structure_, this statement doesn't mean much.

Yes, you can view macros as term rewriters, but you are still missing 
the important part that they are seamlessly integrated with Lisp compilers.

Saying that macros are "just" term rewriters strongly indicates that you 
don't know what you're talking about.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1tqhqsuehce7@corp.supernews.com>
Pascal Costanza wrote:
> Jon Harrop wrote:
>> As Jo said, you use HOFs:
>> 
>> # let rec while1 p f x = if p x then (f x; while1 p f x);;
>> val while1 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
>> 
>> # let while2 p f x = while p x do f x done;;
>> val while2 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
> 
> Your while2 is not the same as my while2. There is no macro expansion
> going on,

You claimed that "while" could not be implemented in terms of higher-order
functions. So I implemented it in terms of higher-order functions.

Then you claimed that macros allow you to hide the internal representation
of "while" but higher-order functions cannot. So I posted the above pair
of "while" implementations, written as higher-order functions with the same
interface but different internal representations (functional and
imperative, respectively).

> and it doesn't use the same implementation strategy as in my 
> version.

I can see no difference.

> The latter is especially important: You have no control over 
> the details of the implementation strategy.

The above are objective counter-examples that represent the extent that HOFs
overlap with macros in terms of functionality.

A better example of functionality that is better provided by a macro might
be the definition of a sum type and associated functions, such as pretty
marshalling. In SML and Haskell you must write:

type t = A|B|C

let string_of_t = function
  | A -> "Foo"
  | B -> "Bar"
  | C -> "Baz"

let t_of_string = function
  | "Foo" -> A
  | "Bar" -> B
  | "Baz" -> C
  | _ -> invalid_arg "t_of_string"

This can be automated for a special case using a macro in OCaml or Lisp.

> Since with macros, we have such control, I was for example able to
> provide a very efficient implementation of the minim language. We are
> still waiting for your version.

I'm on it...

>> Macros are just rudimentary term rewriters. Lisp macros are just
>> rudimentary term rewriters without pattern matching on terms and where
>> terms may only be expressed using s-exprs.
> 
> Since s-expressions consists of lists, symbols and _any other data
> structure_, this statement doesn't mean much.

If Lisp implemented only bit sequences you would be saying that any data
structure can be transformed into a bit sequence using some subjectively
chosen encoding and, therefore, having other data structures is pointless.

Out of curiosity, how would you embed these mutually cyclic lists in a
s-expr:

# let rec a = 0 :: b and b = 1 :: a;;
val a : int list =
  [0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0...
val b : int list =
  [1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1...

> Yes, you can view macros as term rewriters, but you are still missing
> the important part that they are seamlessly integrated with Lisp
> compilers.

I agree completely. I was focusing on the points of difference rather than
the similarities.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187084336.890138.237470@o61g2000hsh.googlegroups.com>
On 14 Aug., 02:20, Jon Harrop <····@ffconsultancy.com> wrote:
>
> A better example of functionality that is better provided by a macro might
> be the definition of a sum type and associated functions, such as pretty
> marshalling. In SML and Haskell you must write:
>
> type t = A|B|C
>
> let string_of_t = function
>   | A -> "Foo"
>   | B -> "Bar"
>   | C -> "Baz"
>
> let t_of_string = function
>   | "Foo" -> A
>   | "Bar" -> B
>   | "Baz" -> C
>   | _ -> invalid_arg "t_of_string"

Um, in Haskell you simply write

  data T = A | B | C deriving (Show)

- Andreas
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c49fmtn12b7bd@corp.supernews.com>
········@ps.uni-sb.de wrote:
> On 14 Aug., 02:20, Jon Harrop <····@ffconsultancy.com> wrote:
>> A better example of functionality that is better provided by a macro
>> might be the definition of a sum type and associated functions, such as
>> pretty marshalling. In SML and Haskell you must write:
>>
>> type t = A|B|C
>>
>> let string_of_t = function
>>   | A -> "Foo"
>>   | B -> "Bar"
>>   | C -> "Baz"
>>
>> let t_of_string = function
>>   | "Foo" -> A
>>   | "Bar" -> B
>>   | "Baz" -> C
>>   | _ -> invalid_arg "t_of_string"
> 
> Um, in Haskell you simply write
> 
>   data T = A | B | C deriving (Show)

Note that "Foo", "Bar" and "Baz" do not happen to be the names of the
identifiers in the source code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5icvk9F3ns21nU1@mid.individual.net>
Jon Harrop wrote:
> Pascal Costanza wrote:
>> Jon Harrop wrote:
>>> As Jo said, you use HOFs:
>>>
>>> # let rec while1 p f x = if p x then (f x; while1 p f x);;
>>> val while1 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
>>>
>>> # let while2 p f x = while p x do f x done;;
>>> val while2 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
>> Your while2 is not the same as my while2. There is no macro expansion
>> going on,
> 
> You claimed that "while" could not be implemented in terms of higher-order
> functions.

...in a strict language with an interface that doesn't require the 
client to pass functions as parameters.

> Then you claimed that macros allow you to hide the internal representation
> of "while" but higher-order functions cannot. So I posted the above pair
> of "while" implementations, written as higher-order functions with the same
> interface but different internal representations (functional and
> imperative, respectively).
> 
>> and it doesn't use the same implementation strategy as in my 
>> version.
> 
> I can see no difference.

Look closer.

>>> Macros are just rudimentary term rewriters. Lisp macros are just
>>> rudimentary term rewriters without pattern matching on terms and where
>>> terms may only be expressed using s-exprs.
>> Since s-expressions consists of lists, symbols and _any other data
>> structure_, this statement doesn't mean much.
> 
> If Lisp implemented only bit sequences you would be saying that any data
> structure can be transformed into a bit sequence using some subjectively
> chosen encoding and, therefore, having other data structures is pointless.

Nonsense.

> Out of curiosity, how would you embed these mutually cyclic lists in a
> s-expr:
> 
> # let rec a = 0 :: b and b = 1 :: a;;
> val a : int list =
>   [0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0...
> val b : int list =
>   [1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1...

(let ((a (list 0))
       (b (list 1)))
   (setf (cdr a) b
         (cdr b) a)
   (values a b))


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-37AD42.10453514082007@news-europe.giganews.com>
In article <···············@mid.individual.net>,
 Pascal Costanza <··@p-cos.net> wrote:

> Jon Harrop wrote:
> > Pascal Costanza wrote:
> >> Jon Harrop wrote:
> >>> As Jo said, you use HOFs:
> >>>
> >>> # let rec while1 p f x = if p x then (f x; while1 p f x);;
> >>> val while1 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
> >>>
> >>> # let while2 p f x = while p x do f x done;;
> >>> val while2 : ('a -> bool) -> ('a -> 'b) -> 'a -> unit = <fun>
> >> Your while2 is not the same as my while2. There is no macro expansion
> >> going on,
> > 
> > You claimed that "while" could not be implemented in terms of higher-order
> > functions.
> 
> ...in a strict language with an interface that doesn't require the 
> client to pass functions as parameters.
> 
> > Then you claimed that macros allow you to hide the internal representation
> > of "while" but higher-order functions cannot. So I posted the above pair
> > of "while" implementations, written as higher-order functions with the same
> > interface but different internal representations (functional and
> > imperative, respectively).
> > 
> >> and it doesn't use the same implementation strategy as in my 
> >> version.
> > 
> > I can see no difference.
> 
> Look closer.
> 
> >>> Macros are just rudimentary term rewriters. Lisp macros are just
> >>> rudimentary term rewriters without pattern matching on terms and where
> >>> terms may only be expressed using s-exprs.
> >> Since s-expressions consists of lists, symbols and _any other data
> >> structure_, this statement doesn't mean much.
> > 
> > If Lisp implemented only bit sequences you would be saying that any data
> > structure can be transformed into a bit sequence using some subjectively
> > chosen encoding and, therefore, having other data structures is pointless.
> 
> Nonsense.
> 
> > Out of curiosity, how would you embed these mutually cyclic lists in a
> > s-expr:
> > 
> > # let rec a = 0 :: b and b = 1 :: a;;
> > val a : int list =
> >   [0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0...
> > val b : int list =
> >   [1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1; 0; 1...
> 
> (let ((a (list 0))
>        (b (list 1)))
>    (setf (cdr a) b
>          (cdr b) a)
>    (values a b))
> 
> 
> Pascal


? (setf *print-circle* t)

T

? (let ((a (list 0))
       (b (list 1)))
   (setf (cdr a) b
         (cdr b) a)
   (values a b))

#1=(0 1 . #1#)
#1=(1 0 . #1#)
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187194038.346588.251670@i13g2000prf.googlegroups.com>
On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> You claimed that "while" could not be implemented in terms of higher-order
> functions. So I implemented it in terms of higher-order functions.

Note that Harrop doesn't show us uses, just the definition.

His HOF definition of while is called with functions.  Defining those
functions is syntax beyond "sequence of statements".  Those functions
are almost all single-use.

There's nothing wrong with single-use functions, but having to define
them so one can use a HOF while is overhead.

Yes, this matters.  If it didn't, languages would have far fewer
intentional forms.  For example, IF can be a pre-defined HOF.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c6dklbg8qia7f@corp.supernews.com>
Andy Freeman wrote:
> On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> You claimed that "while" could not be implemented in terms of
>> higher-order functions. So I implemented it in terms of higher-order
>> functions.
> 
> Note that Harrop doesn't show us uses, just the definition.

# while1 (fun x -> !x > 0) decr (ref 5);;
- : unit = ()

> His HOF definition of while is called with functions.  Defining those
> functions is syntax beyond "sequence of statements".  Those functions
> are almost all single-use.
> 
> There's nothing wrong with single-use functions, but having to define
> them so one can use a HOF while is overhead.
> 
> Yes, this matters.  If it didn't, languages would have far fewer
> intentional forms.

Pattern matching being by far the most important.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187275752.055520.203480@q3g2000prf.googlegroups.com>
On Aug 15, 10:12 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > Note that Harrop doesn't show us uses, just the definition.
>
> # while1 (fun x -> !x > 0) decr (ref 5);;
> - : unit = ()

Let's compare uses of WHILE-HOF (HOF implementation of while) with
ordinary WHILE.

(WHILE (< x 0) (incf x))
(WHILE-HOF (< x 0) (lambda () (incf x)))

The HOF version must be called with a function.

Function definition is syntax.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cjli646matr9a@corp.supernews.com>
Andy Freeman wrote:
> On Aug 15, 10:12 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Andy Freeman wrote:
>> > Note that Harrop doesn't show us uses, just the definition.
>>
>> # while1 (fun x -> !x > 0) decr (ref 5);;
>> - : unit = ()
> 
> Let's compare uses of WHILE-HOF (HOF implementation of while) with
> ordinary WHILE.
> 
> (WHILE (< x 0) (incf x))
> (WHILE-HOF (< x 0) (lambda () (incf x)))
> 
> The HOF version must be called with a function.
> 
> Function definition is syntax.

In a modern FPL:

  while ((>=) 0) incr x

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13canrmr8v5l8c0@corp.supernews.com>
Andy Freeman wrote:
> On Aug 15, 10:12 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Andy Freeman wrote:
>> > Note that Harrop doesn't show us uses, just the definition.
>>
>> # while1 (fun x -> !x > 0) decr (ref 5);;
>> - : unit = ()
> 
> Let's compare uses of WHILE-HOF (HOF implementation of while) with
> ordinary WHILE.
> 
> (WHILE (< x 0) (incf x))
> (WHILE-HOF (< x 0) (lambda () (incf x)))
> 
> The HOF version must be called with a function.
> 
> Function definition is syntax.

In a modern FPL:

  while ((>=) 0) incr x

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c91745q73jq28@corp.supernews.com>
Andy Freeman wrote:
> On Aug 15, 10:12 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Andy Freeman wrote:
>> > Note that Harrop doesn't show us uses, just the definition.
>>
>> # while1 (fun x -> !x > 0) decr (ref 5);;
>> - : unit = ()
> 
> Let's compare uses of WHILE-HOF (HOF implementation of while) with
> ordinary WHILE.
> 
> (WHILE (< x 0) (incf x))
> (WHILE-HOF (< x 0) (lambda () (incf x)))
> 
> The HOF version must be called with a function.
> 
> Function definition is syntax.

In a modern FPL:

  while ((>=) 0) incr x

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <871we2kf1z.fsf@dnsalias.com>
Andy Freeman <······@earthlink.net> writes:
> On Aug 15, 10:12 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Andy Freeman wrote:
>> > Note that Harrop doesn't show us uses, just the definition.
>>
>> # while1 (fun x -> !x > 0) decr (ref 5);;
>> - : unit = ()
>
> Let's compare uses of WHILE-HOF (HOF implementation of while) with
> ordinary WHILE.
>
> (WHILE (< x 0) (incf x))
> (WHILE-HOF (< x 0) (lambda () (incf x)))
>
> The HOF version must be called with a function.
>
> Function definition is syntax.

Parentheses are syntax too.  Let's compare it with Smalltalk that uses
HOFs :-

  [x < 0] whileTrue: [x incf].
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <yf643goj2u.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

> On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> You claimed that "while" could not be implemented in terms of higher-order
>> functions. So I implemented it in terms of higher-order functions.
>
> Note that Harrop doesn't show us uses, just the definition.

To whom are you talking by the way? Not to the author of the post
you're replying to, it seems.

> His HOF definition of while is called with functions.  Defining those
> functions is syntax beyond "sequence of statements".  Those functions
> are almost all single-use.
>
> There's nothing wrong with single-use functions, but having to define
> them so one can use a HOF while is overhead.

In Haskell, e.g.  the overhead is really, really small: \x ->
something-with x vs. (something-with x).

> Yes, this matters.  If it didn't, languages would have far fewer
> intentional forms.  For example, IF can be a pre-defined HOF.

But even if I grant your point, what ist the conclusion you're drawing
from it? From your style of writing I suspect there is a QED lurking
in the bushes, but it would help if you actually restated (a) what are
you trying to show and (b) show the connection to the point you're
making.

Regards -- Markus
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187277860.733358.280750@i13g2000prf.googlegroups.com>
On Aug 15, 9:31 am, ·····································@ANDTHATm-e-
leypold.de (Markus E.L. 2) wrote:
> Andy Freeman wrote:
> > On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> >> You claimed that "while" could not be implemented in terms of higher-order
> >> functions. So I implemented it in terms of higher-order functions.
>
> > Note that Harrop doesn't show us uses, just the definition.
>
> To whom are you talking by the way? Not to the author of the post
> you're replying to, it seems.

I'm addressing "the audience".

It's unlikely that Harrop is going to be convinced by these
discussions (if only because his livelyhood depends on belief in his
positions) and if one was interested in convincing him, e-mail would
be better.

> In Haskell, e.g.  the overhead is really, really small: \x ->
> something-with x vs. (something-with x).

Of course it's small.  It's common, so it would be dumb to make it
big.

> > Yes, this matters.  If it didn't, languages would have far fewer
> > intentional forms.  For example, IF can be a pre-defined HOF.
>
> But even if I grant your point,

Does anyone want to argue that IF can't be a HOF with three arguments,
one a test value and the other two being alternative functions?  In
fact, it need not call the chosen function - it can just return it and
let the caller do the deed.  In other words, the typical use would
look like: ((ef (< x 0) (lambda () x) (lambda () y))).

OCaml/Haskell reduce the overhead of defining those two functions, but
not so far that if is an HOF.  In short, syntax matters.

> what ist the conclusion you're drawing from it?

I've stated the conclusion repeatedly.  Syntax matters.  The fact that
one can do without something doesn't imply that it isn't useful.

If syntax matters, the issue is how to make it useful for the problems
at hand, and yes, there are multiple problems.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c916upirlfv27@corp.supernews.com>
Andy Freeman wrote:
> OCaml/Haskell reduce the overhead of defining those two functions, but
> not so far that if is an HOF.  In short, syntax matters.

Jo made the subtle but important point that there is no overhead in the case
of a lazy language because computations are naturally deferred so you do
not need to wrap them in a closure.

In other words, you're bragging about solving a problem that you invented.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <c6fy2j49rf.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

> On Aug 15, 9:31 am, ·····································@ANDTHATm-e-
> leypold.de (Markus E.L. 2) wrote:
>> Andy Freeman wrote:
>> > On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> >> You claimed that "while" could not be implemented in terms of higher-order
>> >> functions. So I implemented it in terms of higher-order functions.
>>
>> > Note that Harrop doesn't show us uses, just the definition.
>>
>> To whom are you talking by the way? Not to the author of the post
>> you're replying to, it seems.
>
> I'm addressing "the audience".

I always like it if people seem to talk to me and just stare into the
void left of my left ear.

> It's unlikely that Harrop is going to be convinced by these
> discussions (if only because his livelyhood depends on belief in his
> positions) and if one was interested in convincing him, e-mail would
> be better.

But is the audience interested?


>> In Haskell, e.g.  the overhead is really, really small: \x ->
>> something-with x vs. (something-with x).
>
> Of course it's small.  It's common, so it would be dumb to make it
> big.
>
>> > Yes, this matters.  If it didn't, languages would have far fewer
>> > intentional forms.  For example, IF can be a pre-defined HOF.
>>
>> But even if I grant your point,
>
> Does anyone want to argue that IF can't be a HOF with three arguments,
> one a test value and the other two being alternative functions?  In
> fact, it need not call the chosen function - it can just return it and
> let the caller do the deed.  In other words, the typical use would
> look like: ((ef (< x 0) (lambda () x) (lambda () y))).


Or like

    if (< x 0)
       something (foo (
       somethingelse (bar (baz boo))

In Haskel (AFAIK there is no lambda with 0 arity in Haskell, since
evalauation is lazy).


> OCaml/Haskell reduce the overhead of defining those two functions, but
> not so far that if is an HOF.  In short, syntax matters.

See above. To illustrate the point in more detail a complete sample
implementation of an alternative "conditional construct":

   select 1 alt1 alt2 alt3 = alt1
   select 2 alt1 alt2 alt3 = alt2
   select 3 alt1 alt2 alt3 = alt3

and the usage

   Main> select 3 "a" "b" "c"
   "c"
   *Main> select 2 (bottom 15) (17/2) (20/10)
   8.5
   *Main> select 1 (bottom 15) (17/2) (20/10)
   ERROR - Garbage collection fails to reclaim sufficient space

The last invocation illustrates that actually there is no lambda
required to protect the arguments. The example (IMHO) also show why
people have been claiming that in a pure FPL HOFs can replace macros.


>> what ist the conclusion you're drawing from it?
>
> I've stated the conclusion repeatedly.  Syntax matters.  The fact that
> one can do without something doesn't imply that it isn't useful.
>
> If syntax matters, the issue is how to make it useful for the problems
> at hand, and yes, there are multiple problems.

This statements seems to be incomplete: Yes, syntax matters (and I'm
surprised to hear that from a Lisp person since anytime anyone is so
misguided as to critiszise the parenthesis in Lisp he/she gets the
reply that it is only syntax and that one doesn't need the overly rich
and not uniform syntax of, eg. C, C++, Pascal etc). But I can't find
the word "macro" in your statement. Do you expect me to fill out the
gaps myself? It is my suspicion that this thread has become pretty
boring and useless, because you (plural) have forgotten what you're
arguing: The topic has moved, but everyone expects that it is somehow
well known what the arguments are trying to prove. I also suspect that
you would be forced to notice that if you made the effort to summarize
your hypthesis and your argument in a self contained way, essay style.

Regards -- Markus
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0tzqygjse.fsf@gemini.franz.com>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> Andy Freeman wrote:
>
>> On Aug 15, 9:31 am, ·····································@ANDTHATm-e-
>> leypold.de (Markus E.L. 2) wrote:
>>> Andy Freeman wrote:
>>> > On Aug 13, 5:20 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>>> >> You claimed that "while" could not be implemented in terms of higher-order
>>> >> functions. So I implemented it in terms of higher-order functions.
>>>
>>> > Note that Harrop doesn't show us uses, just the definition.
>>>
>>> To whom are you talking by the way? Not to the author of the post
>>> you're replying to, it seems.
>>
>> I'm addressing "the audience".
>
> I always like it if people seem to talk to me and just stare into the
> void left of my left ear.

We'll be sure to do that in the future.  Toward that end, would you
kindly post a picture of your left ear, so we can oblige you?

:-)

Also, I'm glad you specified your left ear; I'd have wondered about
your self-esteem if you had implied a void to the left of your right
ear.

:-) :-)

>> It's unlikely that Harrop is going to be convinced by these
>> discussions (if only because his livelyhood depends on belief in his
>> positions) and if one was interested in convincing him, e-mail would
>> be better.
>
> But is the audience interested?

Yes.

>>> what ist the conclusion you're drawing from it?
>>
>> I've stated the conclusion repeatedly.  Syntax matters.  The fact that
>> one can do without something doesn't imply that it isn't useful.
>>
>> If syntax matters, the issue is how to make it useful for the problems
>> at hand, and yes, there are multiple problems.
>
> This statements seems to be incomplete: Yes, syntax matters (and I'm
> surprised to hear that from a Lisp person since anytime anyone is so
> misguided as to critiszise the parenthesis in Lisp he/she gets the
> reply that it is only syntax and that one doesn't need the overly rich
> and not uniform syntax of, eg. C, C++, Pascal etc).

According to Wikipedia, syntax is implied to be the study of the whole
of a grammar, including its structure.  But when non-lispers talk to
lispers about syntax, they get stuck in the "lisp is parentheses"
myth.  What they don't know about lispers is that lispers don't "see"
the parentheses when they program, because they have tools that
maintain indentations according to the parens.  So the conversation
that a lisper and a non-lisper gets stuck in is the "lisp's syntax is
parentheses"/"syntax (i.e. parentheses) isn't important".  Is it
really true that parens aren't important?  Of course not; just try
mismatching a paren or two, and see the fireworks.  But at the
cognitive level, when the lisper is really considering the _syntax_ of
the program, he is not looking at the parentheses, because the parens
disappear to a lisper.

 [ ... ]

>          It is my suspicion that this thread has become pretty
> boring and useless, because you (plural) have forgotten what you're
> arguing: The topic has moved, but everyone expects that it is somehow
> well known what the arguments are trying to prove. I also suspect that
> you would be forced to notice that if you made the effort to summarize
> your hypthesis and your argument in a self contained way, essay style.

Yes, this thread should die, but it is kept alive by a few people who
refuse to stop talking about it.  I place myself in the same category
of the few; though I am not the biggest offender (when it comes to
keeping this thread alive), I am after all writing this reply, so in
that sense I am keeping alive a thread that should die.

I'm not asking you or anybody else to stop writing.  But I do see
arguments being made over and over again, by the same people, to the
same people, some having the expectation that the result will change.
I, for one, refuse to make an argument twice, because if my audience
didn't listen to me the first time, then expecting them to listen to
me again would be insane.

What I see in this thread are many facets:

 1. We have a lot of people in different "camps", coming from
different points of view.

 2. We have sets of definitions that are assumed but not agreed upon.
This is based on the unswerving belief that the definitions that were
learned in each person's point of view are the only correct
definition, regardless of why others' backgrounds have provided
different definitions for the same terms.

 3. We have people from all sides asking others for examples, over and
over.  People on the other side are not conducive to complying,
because the examples that are given are ripped apart by the asker,
thus denegrating the supplier's point of view.  How can one be
comfortable providing examples in such an environment?

 4. Finally, we have a salesman, who doesn't _want_ this thread to
die; his agenda is to sell his product, and he uses repetition and
misinformation to continue posting his drivel to people who don't want
to hear it.

Now, there are some positive aspects to this thread:

 a. It raises the numbers for both c.l.l and c.l.f - we'll have to see
what the thread does for us both on tiobe.com in the next few months...

 b. It raises awareness of different points of view, to the extent
that people accept that there _are_ different points of view.

In a perfect world, people contributing to this thread would realize
that they have no control over the thread, nor over the opinions of
others listening.  They would realize that their contributions are
precisely that: contributions, and as such, they need to let them go -
if a contribution isn't accepted, it isn't your mission, nor is it
even in your power, to force the other person to accept what you have
to say, so just get it said, and let it go, perhaps with one or at
most two followups for clarification.  I daresay that most of the
articles in this thread would be categorized by their respective
authors as clarifications, but so much clarification here is
just muddying the waters.

Well, enough from me.  I'll try to refrain from saying more about the
metasubject of this thread (except for a followup or two for
clarification).

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Pascal Bourguignon
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <871we2avgi.fsf@thalassa.informatimago.com>
Duane Rettig <·····@franz.com> writes:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
>> [...]
>> I always like it if people seem to talk to me and just stare into the
>> void left of my left ear.
>
> We'll be sure to do that in the future.  Toward that end, would you
> kindly post a picture of your left ear, so we can oblige you?
>
> :-)
>
> Also, I'm glad you specified your left ear; I'd have wondered about
> your self-esteem if you had implied a void to the left of your right
> ear.
>
> :-) :-)

But when you stare at his left ear, and at the left of his left ear
(but not too far), isn't it where you had in mind?

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

NOTE: The most fundamental particles in this product are held
together by a "gluing" force about which little is currently known
and whose adhesive power can therefore not be permanently
guaranteed.
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0ir7dhi1j.fsf@gemini.franz.com>
Pascal Bourguignon <···@informatimago.com> writes:

> Duane Rettig <·····@franz.com> writes:
>
>> ·····································@ANDTHATm-e-leypold.de (Markus
>> E.L. 2) writes:
>>> [...]
>>> I always like it if people seem to talk to me and just stare into the
>>> void left of my left ear.
>>
>> We'll be sure to do that in the future.  Toward that end, would you
>> kindly post a picture of your left ear, so we can oblige you?
>>
>> :-)
>>
>> Also, I'm glad you specified your left ear; I'd have wondered about
>> your self-esteem if you had implied a void to the left of your right
>> ear.
>>
>> :-) :-)
>
> But when you stare at his left ear, and at the left of his left ear
> (but not too far), isn't it where you had in mind?

Ah, I see.  Actually, this illustrates the more serious point of my
last message perfectly - with apologies to Markus for using this
example:

The issue is one of point-of-view.  If, as I assume Markus did, one
takes "left of" from the speaker's point of view, then there is no
harm done; the void is out in space somewhere.  But if you look at it
as if you were facing Markus, as I believe you have done, then it
looks like Markus is being self-deprecating.  Of course, Markus could
indeed have been self-deprecating in his message, and my natural
tendency was to give him the benefit of the doubt (and to try to view
things from _his_ point of view), so I may be the one in the minority
in this case.

At any rate, your observation has been a perfect object lesson on the
importance of understading points-of-view, in the pursuit of
understanding the knowledge other people posesess.

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87643gjifj.fsf@dnsalias.com>
Andy Freeman <······@earthlink.net> writes:
> His HOF definition of while is called with functions.  Defining those
> functions is syntax beyond "sequence of statements".  Those functions
> are almost all single-use.
>
> There's nothing wrong with single-use functions, but having to define
> them so one can use a HOF while is overhead.

In Smalltalk the overhead is putting '[' at the start and ']' at the
end.  Hard to believe a Lisper would consider that too much overhead
given the prolific use of '(' and ')' in Lisp.


> Yes, this matters.  If it didn't, languages would have far fewer
> intentional forms.  For example, IF can be a pre-defined HOF.

Which it is in Smalltalk.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9ro7o$a7n$2@online.de>
Jon Harrop schrieb:
> Pascal Costanza wrote:
>> Joachim Durchholz wrote:
>>> Technically, there may be differences - but they don't buy any
>>> expressivity.
>> Yes, they do.
> 
> Turing argument.

No. This is about expressivity (i.e. the number and complexity of 
constructs to get a specific algorithm implemented), not about 
algorithmic power.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4fufntsmhfd4@corp.supernews.com>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> Pascal Costanza wrote:
>>> Joachim Durchholz wrote:
>>>> Technically, there may be differences - but they don't buy any
>>>> expressivity.
>>> Yes, they do.
>> 
>> Turing argument.
> 
> No. This is about expressivity

Expressiveness.

> (i.e. the number and complexity of 
> constructs to get a specific algorithm implemented), not about
> algorithmic power.

"Turing argument" means a constant-size boilerplate buys you the
expressiveness of another paradigm via an interpreter for it. In this case,
you don't need macros because you can write a term rewriter.

All that matters is how easily you can solve problems in given language
implementations. Thus far, there have been no examples in this thread
demonstrating the utility of Lisp's macros. I have several examples where
OCaml's macros are important but none of them can be done in Lisp because
it lacks the prerequisites.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u8x8dmytr.fsf@nhplace.com>
[ comp.lang.lisp only; http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Jon Harrop <···@ffconsultancy.com> writes:

> Thus far, there have been no examples in this thread demonstrating
> the utility of Lisp's macros.

Well, hmmm.  What can we conclude from that?

 (a) Lisp macros have little or no utility, so they go unused.

 (b) This thread has little or no utility, so it goes unused.

[insert sound of jeopardy music playing in the background.]

> I have several examples where OCaml's macros are important but none
> of them can be done in Lisp because it lacks the prerequisites.

Hmmm.

"several"  Yep, that's a big number.

"none of them can be done in Lisp"  Sounds very definitive.

"lacks the prerequisites"  Sounds very formalism-esque.

You know, the problem here is you're trying to wage a case for the
undeniability of theorem proving as a universally compelling and
central way of thinking about and notating all issues.... Yet all the
while you're making statements like this that sound like they're
offered as proofs of something but aren't really.  Do you not see the
irony in that?
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural         language Minim
Date: 
Message-ID: <f9ro3t$a7n$1@online.de>
Pascal Costanza schrieb:
> Joachim Durchholz wrote:
>> Any code fragment that is an expression also is a function. The open 
>> variables become the parameters, the expression's result the 
>> function's result, and (if the language is impure) the expression's 
>> side effects become the function's side effects.
>>
>> Technically, there may be differences - but they don't buy any 
>> expressivity.
> 
> Yes, they do. You can't tear apart a function and reconstruct it to 
> potentially something different or additional because functions are 
> typically opaque - the only thing you can do with functions is calling 
> them.

I wouldn't want to have a mechanism in a language that can tear apart 
functions. That would mean that I can't simply check the properties of a 
function locally, I'd have to check all the places where the function 
might be transformed, too, so I'd need to do whole-system data-flow 
analysis.

(Lisp: all the power, none of the control. Programmer discipline 
required to avoid disaster - the power helps to make the discipline 
easy, of course, but a single bad day of a single programmer can still 
ruin the project. No thanks, Sir.)

>>> What is good about syntactic abstractions, as provided by macros, is 
>>> that you can change the implementation underneath without changing 
>>> the call sites.
>>
>> That's standard issue for any function, too.
> 
> Here are two different implementations for 'while:
> 
> (defun while-fun (predicate thunk)
>   (when (funcall predicate)
>     (funcall thunk)
>     (while-fun predicate thunk)))
> 
> (defmacro while1 (test &body body)
>   `(while-fun (lambda () ,test)
>      (lambda () ,@body)))
> 
> (defmacro while2 (test &body body)
>   `(tagbody
>      :test (unless ,test (go :end))
>            ,@body
>            (go :test)
>      :end nil))
> 
> So consider the following test case:
> 
> (while (< i n)
>   (setq i (+ i 1)))
> 
> With 'while1, the macro expansion looks like this:
> 
> (while-fun (lambda () (< i n))
>   (lambda () (setq i (+ i 1))))
> 
> 
> With 'while2, it looks like that:
> 
> (tagbody
>   :test (unless (< i n) (go :end))
>         (setq i (+ i 1))
>         (go :test)
>   :end  nil)
> 
> So the while1 uses a functional abstraction internally, and while2 uses 
> a direct imperative implementation. However, in both cases, the call 
> site looks exactly the same.
> 
> How do you achieve the same thing with only functional abstractions?

I'm not sure what point you're trying to make here.

>>> For example, you could expand into goto statements instead
>>> of higher-order functions to control evaluation of code fragments by 
>>> jumping around them, to avoid the overhead of creating closures 
>>> completely.
>>
>> Closures are a run-time overhead in Lisp? Then I understand a bit 
>> better why macros are important.
> 
> They are in all languages, because you cannot inline them in the general 
> case (when they are used as first-class values).

I haven't heard that this is an issue for Haskell.
Well, Haskell's runtime system is a term rewriter, that changes a lot of 
standard rules. In particular, every reduction step inlines and 
evaluates a closure, so closure inlining isn't an optimization, it's the 
normal evaluation machinery.
(The whole thing is even fast enough to compete with imperative 
languages like C. I assume that's because every reduction step, while 
far more heavyweight than a machine instruction, does "more work" in 
some sense, but I don't know details here.)

>>> Closures, functions, and function
>>> arguments in lazy languages, are in contrast typically opaque: You 
>>> cannot modify them, and you typically cannot even inspect them.
>>
>> Right.
>>
>>> So with macros, you have a clear increase in expressive power.
>>
>> And a definitive decrease of control.
>> A function may ignore a parameter, but it cannot make the parameter do 
>> something subtly different than what the caller expected. A macro could.
> 
> You seem to be very afraid of a construct that others regularly use 
> without significant problems.

... that a self-selected group uses without significant problem.

Yes.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4iq6nmd3rcdb@corp.supernews.com>
Joachim Durchholz wrote:
> I wouldn't want to have a mechanism in a language that can tear apart
> functions. That would mean that I can't simply check the properties of a
> function locally, I'd have to check all the places where the function
> might be transformed, too, so I'd need to do whole-system data-flow
> analysis.

Yes, that is a typical abuse of Lisp macros, as Pascal just showed.

> (Lisp: all the power, none of the control.

I'm not sure about the "all the power" bit. ;-)

>> They are in all languages, because you cannot inline them in the general
>> case (when they are used as first-class values).
> 
> I haven't heard that this is an issue for Haskell.
> Well, Haskell's runtime system is a term rewriter, that changes a lot of
> standard rules. In particular, every reduction step inlines and
> evaluates a closure, so closure inlining isn't an optimization, it's the
> normal evaluation machinery.
> (The whole thing is even fast enough to compete with imperative
> languages like C. I assume that's because every reduction step, while
> far more heavyweight than a machine instruction, does "more work" in
> some sense, but I don't know details here.)

Haskell remains 3x slower than C++, OCaml and Stalin-compiled Scheme on the
ray tracer benchmark:

  http://www.ffconsultancy.com/languages/ray_tracer/results.html

I believe that is representative of many real world problems. Unfortunately,
decent benchmarks are few and far between.

Haskell looks very good on the shootout because most of the shootout's
benchmarks boil down to no-ops (binary trees, chameneos, concurrency) or
compute constants (like pi, primes, magic squares, n-body, mandelbrot,
recursive, startup, fasta). That leaves five irreducible benchmarks:

  fannkuch:           Haskell 3.4x slower than C
  k-nucleotide:       Haskell 3.3x slower than C
  regex-dna:          Haskell fails 
  reverse-complement: Haskell 3.7x slower than C
  spectral-norm:      Haskell 17.7x slower than C

So Haskell is >3x slower on all of them. It appears that my ray tracer
actually shows Haskell in a good light...

Here are some interesting observations:

1. Every Haskell implementation uses eager evaluation.
2. Some Haskell implementations are unsafe.
3. Some use low-level bit hackery (e.g. fannkuch).
4. Variance of Haskell's performance results is huge, with several
alternative implementations being orders of magnitude slower than C.

Compare this with OCaml:

  ray tracer:         OCaml 9% slower than C++
  fannkuch:           OCaml 56% slower than C
  k-nucleotide:       OCaml 95% slower than C
  regex-dna:          OCaml 9% slower than C
  reverse-complement: OCaml 81% slower than C
  spectral-norm:      OCaml 5% slower than C

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9tjdt$erv$1@online.de>
Jon Harrop schrieb:
> 
> Haskell remains 3x slower than C++, OCaml and Stalin-compiled Scheme on the
> ray tracer benchmark:
> 
>   http://www.ffconsultancy.com/languages/ray_tracer/results.html
> 
> I believe that is representative of many real world problems.

It may be representative of number-crunching software. I'd expect 
different numbers for webspace servers. I *know* that the ICFP results 
differ (achieving correctness, then speed under rapid prototyping 
conditions).

> Haskell looks very good on the shootout

I wasn't looking at the shootout :-)

> Compare this with OCaml:
> 
>   ray tracer:         OCaml 9% slower than C++
>   fannkuch:           OCaml 56% slower than C
>   k-nucleotide:       OCaml 95% slower than C
>   regex-dna:          OCaml 9% slower than C
>   reverse-complement: OCaml 81% slower than C
>   spectral-norm:      OCaml 5% slower than C

I'm surprised. I'd have thought that OCaml would be rougly the same 
speed as C/C++.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4niv7qoomgde@corp.supernews.com>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>>   http://www.ffconsultancy.com/languages/ray_tracer/results.html
>> 
>> I believe that is representative of many real world problems.
> 
> It may be representative of number-crunching software. I'd expect
> different numbers for webspace servers. I *know* that the ICFP results
> differ (achieving correctness, then speed under rapid prototyping
> conditions).

I haven't look at the ICFP in detail. For what kinds of problems does
Haskell prevail?

>> Compare this with OCaml:
>> 
>>   ray tracer:         OCaml 9% slower than C++
>>   fannkuch:           OCaml 56% slower than C
>>   k-nucleotide:       OCaml 95% slower than C
>>   regex-dna:          OCaml 9% slower than C
>>   reverse-complement: OCaml 81% slower than C
>>   spectral-norm:      OCaml 5% slower than C
> 
> I'm surprised. I'd have thought that OCaml would be rougly the same
> speed as C/C++.

Shootout benchmarks must be implementable in a tiny amount of C, so it
doesn't showcase most problems where functional programming languages
excel.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9vdti$e6h$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> Jon Harrop schrieb:
>>>   http://www.ffconsultancy.com/languages/ray_tracer/results.html
>>>
>>> I believe that is representative of many real world problems.
>> It may be representative of number-crunching software. I'd expect
>> different numbers for webspace servers. I *know* that the ICFP results
>> differ (achieving correctness, then speed under rapid prototyping
>> conditions).
> 
> I haven't look at the ICFP in detail. For what kinds of problems does
> Haskell prevail?

There was no clear pattern, it seemed largely independent of the kind of 
problem.
Probably the data sample is too small to decide from the outside.

Looking at the developer blogs can be more interesting.
In many cases, OCaml and Haskell developers reported that they had the 
tasks done after 40-70% of allotted time, and started with algorithmic 
optimization afterwards. I guess which of the teams wins is then a 
question what optimization turned out to be how useful.
C/C++ teams usually report about chasing pointer bugs and similar 
problems at the same time.
Unfortunately, I haven't yet seen a Lisper blog on his involvement with 
ICFP.

Regards,
Jo
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural           language Minim
Date: 
Message-ID: <5ijqm5F3pk2m2U1@mid.individual.net>
Joachim Durchholz wrote:
> Pascal Costanza schrieb:
>> Joachim Durchholz wrote:
>>> Any code fragment that is an expression also is a function. The open 
>>> variables become the parameters, the expression's result the 
>>> function's result, and (if the language is impure) the expression's 
>>> side effects become the function's side effects.
>>>
>>> Technically, there may be differences - but they don't buy any 
>>> expressivity.
>>
>> Yes, they do. You can't tear apart a function and reconstruct it to 
>> potentially something different or additional because functions are 
>> typically opaque - the only thing you can do with functions is calling 
>> them.
> 
> I wouldn't want to have a mechanism in a language that can tear apart 
> functions. That would mean that I can't simply check the properties of a 
> function locally, I'd have to check all the places where the function 
> might be transformed, too, so I'd need to do whole-system data-flow 
> analysis.

No, macros are strictly local transformations.

> (Lisp: all the power, none of the control. Programmer discipline 
> required to avoid disaster - the power helps to make the discipline 
> easy, of course, but a single bad day of a single programmer can still 
> ruin the project. No thanks, Sir.)

Yes, Lisp is for good programmers, not for average ones.

>>>> What is good about syntactic abstractions, as provided by macros, is 
>>>> that you can change the implementation underneath without changing 
>>>> the call sites.
>>>
>>> That's standard issue for any function, too.
>>
>> Here are two different implementations for 'while:
>>
>> (defun while-fun (predicate thunk)
>>   (when (funcall predicate)
>>     (funcall thunk)
>>     (while-fun predicate thunk)))
>>
>> (defmacro while1 (test &body body)
>>   `(while-fun (lambda () ,test)
>>      (lambda () ,@body)))
>>
>> (defmacro while2 (test &body body)
>>   `(tagbody
>>      :test (unless ,test (go :end))
>>            ,@body
>>            (go :test)
>>      :end nil))
>>
>> So consider the following test case:
>>
>> (while (< i n)
>>   (setq i (+ i 1)))
>>
>> With 'while1, the macro expansion looks like this:
>>
>> (while-fun (lambda () (< i n))
>>   (lambda () (setq i (+ i 1))))
>>
>>
>> With 'while2, it looks like that:
>>
>> (tagbody
>>   :test (unless (< i n) (go :end))
>>         (setq i (+ i 1))
>>         (go :test)
>>   :end  nil)
>>
>> So the while1 uses a functional abstraction internally, and while2 
>> uses a direct imperative implementation. However, in both cases, the 
>> call site looks exactly the same.
>>
>> How do you achieve the same thing with only functional abstractions?
> 
> I'm not sure what point you're trying to make here.

There is a change in implementation strategy in 'while2: There are no 
closures involved at all, but the code fragments are arranged in a way 
that evaluation still happens in a controlled way.

Abstractions are about hiding implementation details. Functional 
abstractions cannot hide some of the implementation details in this 
specific example. That's my point.

I am not concerned about implementing a proper while construct. That's, 
of course, silly. However, this example is a compact illustration of 
what you can do with macros that is simply not possible with functions 
only. This pays off in more involved examples, because you can design 
your API in the most user-friendly way, without leaking _any_ 
implementation details.

>> You seem to be very afraid of a construct that others regularly use 
>> without significant problems.
> 
> ... that a self-selected group uses without significant problem.
> 
> Yes.

Good enough for me.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural             language Minim
Date: 
Message-ID: <fa2fp3$e7j$3@online.de>
Pascal Costanza schrieb:
> Joachim Durchholz wrote:
>> Pascal Costanza schrieb:
>>> Joachim Durchholz wrote:
>>>> Any code fragment that is an expression also is a function. The open 
>>>> variables become the parameters, the expression's result the 
>>>> function's result, and (if the language is impure) the expression's 
>>>> side effects become the function's side effects.
>>>>
>>>> Technically, there may be differences - but they don't buy any 
>>>> expressivity.
>>>
>>> Yes, they do. You can't tear apart a function and reconstruct it to 
>>> potentially something different or additional because functions are 
>>> typically opaque - the only thing you can do with functions is 
>>> calling them.
>>
>> I wouldn't want to have a mechanism in a language that can tear apart 
>> functions. That would mean that I can't simply check the properties of 
>> a function locally, I'd have to check all the places where the 
>> function might be transformed, too, so I'd need to do whole-system 
>> data-flow analysis.
> 
> No, macros are strictly local transformations.

The macro can inspect and modify its parameters.

A parameter might be a closure that was constructed somewhere else in 
the system.

So the macro can modify what some other part of the system has 
constructed as a piece of code.

>> (Lisp: all the power, none of the control. Programmer discipline 
>> required to avoid disaster - the power helps to make the discipline 
>> easy, of course, but a single bad day of a single programmer can still 
>> ruin the project. No thanks, Sir.)
> 
> Yes, Lisp is for good programmers, not for average ones.

I have bad news for you: roughly 50% of programmers are even below average.

[Sorry, not enough time to reply to the second part. I'll have to drop 
the ball here.]

Regards,
Jo
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-239211.23424016082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Pascal Costanza schrieb:
> > Joachim Durchholz wrote:
> >> Pascal Costanza schrieb:
> >>> Joachim Durchholz wrote:
> >>>> Any code fragment that is an expression also is a function. The open 
> >>>> variables become the parameters, the expression's result the 
> >>>> function's result, and (if the language is impure) the expression's 
> >>>> side effects become the function's side effects.
> >>>>
> >>>> Technically, there may be differences - but they don't buy any 
> >>>> expressivity.
> >>>
> >>> Yes, they do. You can't tear apart a function and reconstruct it to 
> >>> potentially something different or additional because functions are 
> >>> typically opaque - the only thing you can do with functions is 
> >>> calling them.
> >>
> >> I wouldn't want to have a mechanism in a language that can tear apart 
> >> functions. That would mean that I can't simply check the properties of 
> >> a function locally, I'd have to check all the places where the 
> >> function might be transformed, too, so I'd need to do whole-system 
> >> data-flow analysis.
> > 
> > No, macros are strictly local transformations.
> 
> The macro can inspect and modify its parameters.
> 
> A parameter might be a closure that was constructed somewhere else in 
> the system.

Na. Macros are running at compile time. The closure is something
at runtime - the macro is long gone then.

> So the macro can modify what some other part of the system has 
> constructed as a piece of code.

A closure is not a piece of source code. It is a data structure
that in Lisp cannot be inspected (only with low-level
implementation dependent functions).

> 
> >> (Lisp: all the power, none of the control. Programmer discipline 
> >> required to avoid disaster - the power helps to make the discipline 
> >> easy, of course, but a single bad day of a single programmer can still 
> >> ruin the project. No thanks, Sir.)
> > 
> > Yes, Lisp is for good programmers, not for average ones.
> 
> I have bad news for you: roughly 50% of programmers are even below average.

None of the 50% will ever understand Common Lisp. From the remaining
50% only 4% may understand Haskell.

> 
> [Sorry, not enough time to reply to the second part. I'll have to drop 
> the ball here.]
> 
> Regards,
> Jo

-- 
http://lispm.dyndns.org
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural               language Minim
Date: 
Message-ID: <5ijuorF3p1sggU1@mid.individual.net>
Joachim Durchholz wrote:
> Pascal Costanza schrieb:
>> Joachim Durchholz wrote:
>>> Pascal Costanza schrieb:
>>>> Joachim Durchholz wrote:
>>>>> Any code fragment that is an expression also is a function. The 
>>>>> open variables become the parameters, the expression's result the 
>>>>> function's result, and (if the language is impure) the expression's 
>>>>> side effects become the function's side effects.
>>>>>
>>>>> Technically, there may be differences - but they don't buy any 
>>>>> expressivity.
>>>>
>>>> Yes, they do. You can't tear apart a function and reconstruct it to 
>>>> potentially something different or additional because functions are 
>>>> typically opaque - the only thing you can do with functions is 
>>>> calling them.
>>>
>>> I wouldn't want to have a mechanism in a language that can tear apart 
>>> functions. That would mean that I can't simply check the properties 
>>> of a function locally, I'd have to check all the places where the 
>>> function might be transformed, too, so I'd need to do whole-system 
>>> data-flow analysis.
>>
>> No, macros are strictly local transformations.
> 
> The macro can inspect and modify its parameters.
> 
> A parameter might be a closure that was constructed somewhere else in 
> the system.
> 
> So the macro can modify what some other part of the system has 
> constructed as a piece of code.

No, that's incorrect.

Assume m is a macro. See the following two invocations of m:

(m (lambda (x) x))

(let ((f (lambda (x) x)))
   (m f))

In the first case, the macro function associated with m will indeed see 
the s-expression '(lambda (x) x) as its parameter. [Note that it's not a 
closure yet, it's just a list with three elements 'lambda, '(x) and 'x!]

In the second case, the macro function will only see 'f as a parameter. 
Since macros are strictly local transformations, there is no way for m 
to inspect the actual binding for f. Bindings only exist at runtime, but 
macros must be able to operate at compile time, when there are no 
bindings yet.

So no: A macro cannot modify arbitrary code from "some" part of a system.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187021865.038861.132870@m37g2000prh.googlegroups.com>
On Aug 10, 4:24 pm, Pascal Costanza <····@p-cos.net> wrote:
> Macros are not about making syntax smooth, they are about hiding
> implementation details.

If the syntax isn't smooth, folks won't use an abstraction that hides
the implementation details - they'll write them out every time.
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9k2jr$nml$1@wildfire.prairienet.org>
 > Rainer Joswig schrieb:
 >> Macros are source transforming functions. They can generate
 >> arbitrary source. The result of the source transformation
 >> (new source) runs at runtime, just as the function would.
 >> So the macro expansion is an additional step.

Joachim Durchholz wrote:
 > Yes, I know that, but what does it buy me?

* Someone else mentioned that macros might be useful inside monads.

* Since macro code is inlined, one thing you gain over a runtime HOF
is speed, at the cost of size.  Unless there's a way to inline a HOF.

* How would you implement code that generates code according to
expressions in the client code without building it into the
language?  For instance, an extensible pattern matcher.

* How would you implement anaphoric variables?

 > WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as
 > higher-order functions.
 > To make that smooth, you'd need as little syntactic overhead as
 > possible. Haskell's syntax is minimal enough for that IMHO.

I think the lack of any extra syntax at all is one of the draws of
macros.  A macro looks like it's built in, and I think many Lispers
like that.  If you include return expressions in the DO<*> forms,
then each of the forms you listed would require two lambdas in a HOF.
Haskell doesn't allow impure actions, *ML uses "fun" instead of the
backslash, and they both require an arrow, so there's no current
language that does what Lisp macros do without at least a small amount
of extra syntactic weight.

-- 
Dan
www.prairienet.org/~dsb/
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87zm0yjauq.fsf@dnsalias.com>
Dan Bensen <··········@cyberspace.net> writes:
> I think the lack of any extra syntax at all is one of the draws of
> macros.  A macro looks like it's built in, and I think many Lispers
> like that.  If you include return expressions in the DO<*> forms,
> then each of the forms you listed would require two lambdas in a HOF.
> Haskell doesn't allow impure actions, *ML uses "fun" instead of the
> backslash, and they both require an arrow, so there's no current
> language that does what Lisp macros do without at least a small amount
> of extra syntactic weight.

I agree.  The question is whether the appropriately chosen "extra
syntactic weight" is onerous and/or an impediment.  If the syntax is
"(lambda () ...)" or "function () -> ..." then it is not hard to argue
that it is onerous.  However, there are better notations.  One of the
best being that used in Smalltalk for blocks: "[...]".  If the "extra
weight" consists only of an opening and closing bracket it would be
somewhat ironic for a Lisper to complain about it :-)
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9m3pf$gb4$1@wildfire.prairienet.org>
 > Dan Bensen <··········@cyberspace.net> writes:
 >> I think the lack of any extra syntax at all is one of the draws of
 >> macros.  A macro looks like it's built in, and I think many Lispers
 >> like that.  ... so there's no current language that does what Lisp
 >> macros do without at least a small amount of extra syntactic weight.

Stephen J. Bevan wrote:
 > I agree.  The question is whether the appropriately chosen "extra
 > syntactic weight" is onerous and/or an impediment.  ...  If the "extra
 > weight" consists only of an opening and closing bracket it would be
 > somewhat ironic for a Lisper to complain about it :-)

Right.  I think a lot of disagreements occur because people view the
tradeoff in their own language.  Lispers have to keep typing "lambda"
and "funcall" to implement HOFs, but don't have to type anything extra
at all to call a macro.  Haskellers, on the other hand, type a backslash
and an arrow to call a HOF, while macros with all that syntax would be
much harder.

-- 
Dan
www.prairienet.org/~dsb/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bsdn01ke39ef7@corp.supernews.com>
Dan Bensen wrote:
> * Since macro code is inlined, one thing you gain over a runtime HOF
> is speed, at the cost of size.  Unless there's a way to inline a HOF.

Inlining often slows functional-style code down.

> * How would you implement code that generates code according to
> expressions in the client code without building it into the
> language?  For instance, an extensible pattern matcher.

You would write an interpreter. Here is some example code implementing part
of Mathematica's pattern matcher, for example:

and pmatches dvs args = match dvs with
  | [] -> raise Not_found
  | (vars, body)::t ->
      let undo = ref(fun () -> ()) in
      try
        let undo = Seq.fold_left2 (Seq.fold_left2 pmatch) undo vars args in
        try_finally rewrite body !undo
      with _ ->
        (!undo)();
        pmatches t args

and pmatch undo pat expr = match pat, expr with
  | `Seq{h=`Blank; t=[||]}, _ -> undo
  | `Seq{h=`Pattern; t=[|`Sym x; `Seq{h=`Blank; t=[||]}|]}, _ ->
      where undo x x.value (Some expr)
  | `Seq{h=ph; t=pt}, `Seq{h=eh; t=et} ->
      Seq.fold_left2 pmatch (pmatch undo ph eh) pt et
  | x, y when x =: y -> undo
  | _ -> (!undo)(); raise Not_found

> * How would you implement anaphoric variables?

You would have to write them explicitly.

>  > WHEN, UNLESS, DOLIST and DOTIMES could all be easily written as
>  > higher-order functions.
>  > To make that smooth, you'd need as little syntactic overhead as
>  > possible. Haskell's syntax is minimal enough for that IMHO.
> 
> I think the lack of any extra syntax at all is one of the draws of
> macros.  A macro looks like it's built in, and I think many Lispers
> like that.  If you include return expressions in the DO<*> forms,
> then each of the forms you listed would require two lambdas in a HOF.

Can you elaborate on this? Doesn't dotimes require only one lambda, for
example?

> Haskell doesn't allow impure actions, *ML uses "fun" instead of the
> backslash, and they both require an arrow, so there's no current
> language that does what Lisp macros do without at least a small amount
> of extra syntactic weight.

You're assuming that the body is written in-line as an anonymous function.
Currying often has the opposite effect: making the HOF approach more
concise.

For example:

  (dotimes (j 100) (test j))

vs:

  dotimes 100 test

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1x3rmfaorgu50.1ee9qolr6j4qb.dlg@40tude.net>
Joachim Durchholz wrote:

> To make that smooth, you'd need as little syntactic overhead as 
> possible. Haskell's syntax is minimal enough for that IMHO.
> 
> I'm not sure how much of that argument transfers to more complicated macros.

Looks like the Haskell syntax is not good enough, because there is Template
Haskell and doesn't look like it is invented by people who don't know how
to write it with higher order functions, because there are functions in the
Haskell List package like this:

  -- | The 'zip4' function takes four lists and returns a list of
  -- quadruples, analogous to 'zip'.
  zip4			:: [a] -> [b] -> [c] -> [d] -> [(a,b,c,d)]
  zip4			=  zipWith4 (,,,)

  -- | The 'zip5' function takes five lists and returns a list of
  -- five-tuples, analogous to 'zip'.
  zip5			:: [a] -> [b] -> [c] -> [d] -> [e] -> [(a,b,c,d,e)]
  zip5			=  zipWith5 (,,,,)

  -- | The 'zip6' function takes six lists and returns a list of
  --   six-tuples, analogous to 'zip'.
  zip6			:: [a] -> [b] -> [c] -> [d] -> [e] -> [f] -> 
                                [(a,b,c,d,e,f)]
  zip6			=  zipWith6 (,,,,,)

I think this is a good example how macros could be used, because it could
be written with nested lists, but I assume it would not be not as fast as
the zip3, zip4 etc. versions.

Lets try it in Lisp. The Haskell functions are working like this:

  Prelude> zip [1,2,3] [22,33,44]
  [(1,22),(2,33),(3,44)]

  Prelude> zip3 [1,2,3] [22,33,44] [55,66,77,88]
  [(1,22,55),(2,33,66),(3,44,77)]

A 2 parameter zip could look like this in Lisp:

  (defun zip (list0 list1)
    (loop for i0 in list0
          for i1 in list1
          collect (list i0 i1)))

  CL-USER > (zip '(1 2 3) '(55 66 77 88))
  ((1 55) (2 66) (3 77))

And the general macro could look like this:

  (defun variable-generator (i)
    (intern (format nil "I~a" i)))

  (defun for-generator (lists)
    (reduce #'nconc 
            (loop for list in lists
                  for i from 0
                  collect `(for ,(variable-generator i) in ,list))))

  (defun list-generator (count)
    (loop for i below count collect (variable-generator i)))

  (defmacro zip (&rest lists)
    `(loop ,@(for-generator lists)
           collect (list ,@(list-generator (length lists)))))

Now you can write code like this:

  CL-USER > (zip '(1 2 3) '(22 33 44) '(55 66 77 88))
  ((1 22 55) (2 33 66) (3 44 77))

The macro creates the right code for the supplied number of arguments:

  CL-USER > (macroexpand-1 '(zip '(1 2 3) '(22 33 44) '(55 66 77 88)))
    (LOOP FOR I0 IN (QUOTE (1 2 3))
          FOR I1 IN (QUOTE (22 33 44))
          FOR I2 IN (QUOTE (55 66 77 88))
          COLLECT (LIST I0 I1 I2))

I think the idea of generating code by executing code of the same program
is a very general idea and could be used for other languages as well, but
integrating it in Haskell could be difficult, because of the rich syntax.
If you have just trees of code, which transforms and generates other trees
of code, like in Lisp, it is much easier to use it.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqotvquueci3e@corp.supernews.com>
Frank Buss wrote:
> Looks like the Haskell syntax is not good enough, because there is
> Template Haskell...

If that were true, people would have migrated to Template Haskell.

OCaml is even stronger evidence: it already bundles macros yet they remain
largely unused. You cannot validly conclude that OCaml's syntax is
insufficient because there is a macro system any more than you can for
Haskell.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1ezn5csa8j2am$.1fabrca1ekyi5$.dlg@40tude.net>
Jon Harrop wrote:

> Frank Buss wrote:
>> Looks like the Haskell syntax is not good enough, because there is
>> Template Haskell...
> 
> If that were true, people would have migrated to Template Haskell.

And maybe some people have migrated. But the reason why I wrote, that the
syntax is not good enough, was the existence of functions like zip3, zip4,
zip5 etc. in standard Haskell modules and the way how they are implemented. 

But using macros for it could be difficult, because in Haskell every
function has a fixed number of arguments (with the very interesting
exception of Haskell's printf implementation) and if you provide less
arguments, the function is curried, which is very useful, too. Maybe a
"zipn n list_1 list_2 list_3 ... listn" would be possible with the ideas of
the printf implemenation? I'm sure at Haskell Cafe they'll know how to
write it :-)

> OCaml is even stronger evidence: it already bundles macros yet they remain
> largely unused. You cannot validly conclude that OCaml's syntax is
> insufficient because there is a macro system any more than you can for
> Haskell.

I didn't wrote anything about OCaml, because I don't know the language, but
do you mean camlp4o? From the examples at this page:

http://caml.inria.fr/pub/old_caml_site/camlp4/tutorial/tutorial007.html

it looks like it is not as powerful as Lisp macros, because it is not part
of the language and looks like it is called like a pre-processor. Is it
possible to call normal OCaml functions from the definitions of camlp4o for
producing the resulting code? How would a "zip" macro look like in camlp4o?

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xwsw2lbj8.fsf@ruckus.brouhaha.com>
Frank Buss <··@frank-buss.de> writes:
> And maybe some people have migrated. But the reason why I wrote, that the
> syntax is not good enough, was the existence of functions like zip3, zip4,
> zip5 etc. in standard Haskell modules and the way how they are implemented. 

Right, it's boilerplate and there are a few other function families
like that, but not that many.  Somewhat worse is the existence of
monadic and non-monadic versions of the same function (map, mapM,
mapM_, etc).  Even worse is the way monads don't compose, so you need
handcrafted monad transformers written monad-by-monad.  But macros
won't help with most of these.

There is also Liskell, if Lisp is more to your liking than Template Haskell.

> But using macros for it could be difficult, because in Haskell every
> function has a fixed number of arguments 

Every Haskell function takes one argument and returns one value:

   printf :: PrintfType r => String -> r

printf takes a format string and returns a HOF that you then apply to
the first "arg", getting another HOF, etc.  It does crazy stuff behind
the scenes.  I suppose it could be addressed with a powerful enough
macro system to parse the format string.  Or you could do it with
fancier types.  It's the motivating example for Cayenne, it seems to
me.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186836622.662744.286030@b79g2000hse.googlegroups.com>
On 11 Aug., 10:04, Frank Buss <····@frank-buss.de> wrote:
> Jon Harrop wrote:
> > Frank Buss wrote:
> >> Looks like the Haskell syntax is not good enough, because there is
> >> Template Haskell...
>
> > If that were true, people would have migrated to Template Haskell.
>
> And maybe some people have migrated. But the reason why I wrote, that the
> syntax is not good enough, was the existence of functions like zip3, zip4,
> zip5 etc. in standard Haskell modules and the way how they are implemented.

Not quite.
See http://citeseer.ist.psu.edu/238836.html "An n-ary zipWith in
Haskell".
Yes, it's downloadable.

Gr��e,
Ingo
From: Lauri Alanko
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9jrpr$dh9$1@oravannahka.helsinki.fi>
In article <·································@40tude.net>,
Frank Buss  <··@frank-buss.de> wrote:
> But using macros for it could be difficult, because in Haskell every
> function has a fixed number of arguments (with the very interesting
> exception of Haskell's printf implementation) and if you provide less
> arguments, the function is curried, which is very useful, too. Maybe a
> "zipn n list_1 list_2 list_3 ... listn" would be possible with the ideas of
> the printf implemenation? I'm sure at Haskell Cafe they'll know how to
> write it :-)

It's even simpler:

(<*>) = zipWith ($)
f <$> x = repeat f <*> x

t1 = (,) <$> ["foo", "bar"] <*> ["FOO", "BAR"]
t2 = (,,) <$> [1,2] <*> ['a', 'b'] <*> [True, False]

> t1
[("foo","FOO"),("bar","BAR")]

> t2
[(1,'a',True),(2,'b',False)]


This is really an instance of the Control.Applicative structure, but
the standard list type already has a different instance (for cartesian
products).


Lauri
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13br2517s09nt60@corp.supernews.com>
Frank Buss wrote:
>> OCaml is even stronger evidence: it already bundles macros yet they
>> remain largely unused. You cannot validly conclude that OCaml's syntax is
>> insufficient because there is a macro system any more than you can for
>> Haskell.
> 
> I didn't wrote anything about OCaml, because I don't know the language,
> but do you mean camlp4o?

Camlp4, yes.

> From the examples at this page: 
> 
> http://caml.inria.fr/pub/old_caml_site/camlp4/tutorial/tutorial007.html
> 
> it looks like it is not as powerful as Lisp macros,

Camlp4 provides extensible lexers and parsers and supports arbitrary LL
grammars and handles associativies, precedences and pretty printing.

For example, the following extends OCaml's pattern matching capabilities to
support two infix operators in patterns:

  EXTEND Gram
    patt: BEFORE "simple"
    [ "sum"
        [ f = patt; "+:"; g = patt -> <:patt< Add($f$, $g$) >> ]
    | "product"
        [ f = patt; "*:"; g = patt -> <:patt< Mul($f$, $g$) >> ]
    ]
    ;
  END

> because it is not part of the language

Camlp4 has been in the core OCaml distribution for many years.

> and looks like it is called like a pre-processor.

It can be called as a preprocessor (to translate between languages or
replace the front-end of the compiler) or used in-line like Lisp macros.

> Is it possible to call normal OCaml functions from the definitions of
> camlp4o for producing the resulting code? 

Yes, you use ordinary OCaml functions to rewrite abstract syntax trees just
as you might in Lisp.

> How would a "zip" macro look like in camlp4o?

Something like this can be used to generate zipn functions for a given "n":

let pvar v n = Ast.PaId(_loc, Ast.IdLid (_loc, v^string_of_int n));;

let var v n = Ast.ExId(_loc, Ast.IdLid (_loc, v^string_of_int n));;

let rec ptup f = function
  | 1 -> f 1
  | n -> Ast.PaCom(_loc, f n, ptup f (n-1));;

let rec tup f = function
  | 1 -> f 1
  | n -> Ast.ExCom(_loc, f n, tup f (n-1));;

let zip n =
  let p n = <:patt< $pvar "h" n$::$pvar "t" n$ >> in
  let tup f n = Ast.ExTup(_loc, tup f n) in
  <:str_item<
    let rec $pvar "zip" n$ = function
      | $Ast.PaTup(_loc, ptup p n)$ ->
          $tup (var "h") n$ :: $var "zip" n$ $tup (var "t") n$
      | _ -> [] >>;;

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9dg58$r7o$1@registered.motzarella.org>
Joachim Durchholz schrieb:

> Now under a macro regime, abstracting stuff out requires defining the 
> macro and writing calls to it, so this aspect is still the same.
> 
> Where do macros *differ* from HOFs?

I think you should really spend time to read in just one weekend the
first 9 chapters of Practical Common Lisp:
http://www.gigamonkeys.com/book/

The third one is about getting rough ideas about Lisp programming.
Seibel is doing lots of stuff in there without having it explained
(explanations follow implicitily later).
Chapters 1-7 will be very easy for you. Read carefully what happens
in 8 and 9. I really suggest to read them in the order from 1 to 9.
After that your understanding about Macros will improve. It also
puts you into the potential position to dive in deeper and really
learn what this is all about.

Imagine a function F in Haskell that takes a string S1 as an argument
and returns a string S2. Now S1 is just a string that contains some
Haskell code. S2 contains some other Haskell code. You can write that
easily - S1 and S2 are strings, nothing more. While in your code you
write the compact S1 what really gets compiled is S2.

The difference is when they run and what they evaluate.
HOFs "run" at "program runtime" while macros run during macro expansion
time which comes before compilation.


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9h8c2$hdn$1@online.de>
Andr� Thieme schrieb:
> The difference is when they run and what they evaluate.
> HOFs "run" at "program runtime" while macros run during macro expansion
> time which comes before compilation.

Yes, and in a pure language, it doesn't matter when something is run.

No need to beat this dead horse anymore. Evaluation time does not 
influence the semantics of an expression in a side-effect-free ("pure") 
language, so you don't need a mechanism that differentiates here.

Regards,
Jo
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9il5a$t8b$1@registered.motzarella.org>
Joachim Durchholz schrieb:
> Andr� Thieme schrieb:
>> The difference is when they run and what they evaluate.
>> HOFs "run" at "program runtime" while macros run during macro expansion
>> time which comes before compilation.
> 
> Yes, and in a pure language, it doesn't matter when something is run.
> 
> No need to beat this dead horse anymore. Evaluation time does not 
> influence the semantics of an expression in a side-effect-free ("pure") 
> language, so you don't need a mechanism that differentiates here.


Joachim, there must be a little misunderstanding.
When you say it doesn't matter when things run in a pure language then
it makes no sense to compare it with macros.
It is as if I say: it is not important to compile the code first. Just
run the program, then compile it.
Or: first compile, then run, then write it.
I told you that a difference between macros and HOFs is when they run.
Macros run before potentially all of your code is written. They run
before you compile and run your program.


Andr�
-- 
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186782025.450633.66900@i38g2000prf.googlegroups.com>
> I told you that a difference between macros and HOFs is when they run.
> Macros run before potentially all of your code is written. They run
> before you compile and run your program.

Absolutely. Whether HOF's are equivalent to macros in a pure language
is not an interesting question. Neither Lisp nor ML are pure
languages, and moreover, whether an expression is evaluated at compile-
time or runtime matters in real systems even if both yield the same
result. There are a lot of practical things that can be done with
macros in existing systems that cannot be done with HOFs in existing
systems.
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186148119.020260.135340@b79g2000hse.googlegroups.com>
On Aug 1, 5:33 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Slobodan Blazeski schrieb:
>
>
>
> > Macros are just a shorthand, everything you could do with them you
> > could do without them.
>
> Sure, just as with any construct of any programming language.
>
>  > So you *could* write your DSL without macros
>
> > like (calculate-approximate-military-budget 2007) but they could save
> > you from writing a lot of boileplate code and help you catch a lot of
> > patterns. Macros  could make your code more concise,  they could
> > generate functions that elsewhere you *must* write by hand, becoming a
> > so-called human compiler.
>
> How does that differ from simply wrapping stuff in a function?

Because you're referring to the code not to the specific
functionality, you see a template in your mind that's repeating on and
on so you use a macro to catch that model.  Macros are embedded
compilers, their result could be any valid lisp expression, function,
CLOS object, creation of new global variable, anything that lisp
implementation holds valid. You can do that with function because
functions always evaluate their arguments, macros control the
evaluation of their arguments. You could achieve the same without
macros but that wouldn't worth the effort.
>
>  > If there's a tool that could make your life
>
> > easier why not using it. Just look at Paul Graham's book On Lisp
> > freely available athttp://www.paulgraham.com/onlisptext.html .
>
>  > If
>
> > you don't find anything that could help your work becoming more easy
> > than maybe your domain and/or personality is not lisp-friendly.
>
> Well, the positive thing about Lisp was that fiddling around with the
> MAP functions in my first Lisp weeks was an incredible a-ha experience.
> Lisp soon lost all credit when the Lisp systems that I encountered were
> bug-ridden, slow, and the Lisp programs written were difficult to
> understand.
> Much of these problems had to do with shallow binding, and I think that
> fad has gone for good. Another issue was with lack of static typing; I
> understand that you can have static typing in Lisp where you need it, so
> things have improved here, but I doubt that this will work well with,
> say, 3rd-party libraries. (I like to guess what a function does from its
> types; in fact, there's a "theorems for free" school that has
> demonstrated that a surprisingly large portion of a function's semantics
> can be inferred from its types alone if the function is known to be pure.)
> I think that syntactic sugar is very often a sign of inelegant language
> design. Macros are a way of doing syntactic sugar, so heavy reliance on
> macro mechanisms makes me predisposed to concluding that the language
> isn't expressive enough wrt. building abstractions.

Not just syntantic sugar, whole embedded langauges instead of just
library of functionality. You could define whole new syntax something
that could help you build on the language just like you build with the
language.
> I also think that while Lisp had a very lean-and-mean syntax initially
> (just S-expressions), it lost most of that by putting all that syntactic
> sugar back in through macros. Sure, it's still all S-expressions, but
> macro names are essentially what keywords are in other languages:
> boilerplate that shouldn't be necessary.

Quite the ordinary they save you from writing boilerpalate, beside you
could write lisp with pure functional style (if domain allows you.)
but sometimes men got to do what a men got to do.
>
> Five years ago, I'd have said "well, you can't work without quite some
> amount of boilerplace keywords". Then I got to know Haskell, and now I
> think that boilerplate keywords should be restricted.

I never tried Haskell, maybe someday when I got some time to spare.
>
> Just my 2c, to illustrate the perspective from which I'm eyeing Lisp:
> sympathetic with the general concept, but it seems to have become too
> baroque for my taste. Plus, it has empowered the programmer (lots of
> powerful commands) without empowering the maintainer (few guarantees
> about what a call does, you need to do whole-system analysis to exclude
> that, say, some code you call doesn't overwrite your local variables).
>
> I know much of this isn't relevant to many Lispers, so YMMV :-)
>
> Regards,
> Jo
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f93398$8a4$1@online.de>
Slobodan Blazeski schrieb:
> On Aug 1, 5:33 pm, Joachim Durchholz <····@durchholz.org> wrote:
>> Slobodan Blazeski schrieb:
>>> So you *could* write your DSL without macros
>>> like (calculate-approximate-military-budget 2007) but they could save
>>> you from writing a lot of boileplate code and help you catch a lot of
>>> patterns. Macros  could make your code more concise,  they could
>>> generate functions that elsewhere you *must* write by hand, becoming a
>>> so-called human compiler.
>> How does that differ from simply wrapping stuff in a function?
> 
> Because you're referring to the code not to the specific
> functionality, you see a template in your mind that's repeating on and
> on so you use a macro to catch that model.  Macros are embedded
> compilers, their result could be any valid lisp expression, function,
> CLOS object, creation of new global variable, anything that lisp
> implementation holds valid.

OK.

> You can do that with function because functions always evaluate their

Assuming you meant "You cannot do that..."

> arguments, macros control the evaluation of their arguments.

That's exactly what I have been elaborating on: if an expression is 
pure, it does not matter in the least when exactly it is evaluated. It 
could be evaluated at compile time, during initialization of the 
program, outside of a tight loop, or inside the loop: the result will 
always be the same.

So in a pure language, you don't need macros - at least not for this reason.

Regards,
Jo
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9aoi4$c4e$1@registered.motzarella.org>
Joachim Durchholz schrieb:

> So in a pure language, you don't need macros - at least not for this 
> reason.

Joachim, in Java, Lisp and Prolog you also don't need macros.
And what is a pure language? Is French pure? Or C#?

I suppose you meant "pure" in regards to side effects, as you mentioned
it doesn't matter at what point an expression is calculated, as the
result would always be the same.

What you can do is to write big parts of your code without side effects.
But for that you don't need Lisp or Haskell: ML would do.
Every meaningful program however needs side effects. Even if it is a
very boring one, that takes no inputs, only calculates what it was given
in hard code - it still must do something with the result. Print it on
the screen, save it to a DB, whatever. So, depending on the domain you
work in you could write nearly everything in Lisp without side effects.
Usually you would want side effects from time to time, because it can
allow more clarity in code and sometimes faster execution.
Being forced to program in a specific style, which a language designer
was thinking of for me, would for me personally not be acceptable, but
this is of course only my opinion.

One thing that is correct: for many cases where a Lisper would use a
macro one would solve it in Haskell with lazyness. But I don't understand
your wording here. When you say "Haskell doesn't need macros for this"
then it sounds as if lazyness would be an advantage.
I could say the same: "while Haskellers solve these things with lazyness
a Lisper could do it with a macro instead. We don't need this lazyness".
Funnily a Lispera could do it also with lazyness, if she wanted.

So, lazyness is only a subset of what macros can do. Talking about it as
if it were some higher concept makes no sense in my ears (and also not
in my eyes and in my brain).
However, I can agree with something like this:
for a trained Haskeller lazyness is very easy to use. His code results
in clean abstractions like in Lisp for many cases where the Lisper would
have used a macro, which is a heavier and therefor uncleaner beast.
Lazyness is more specialized and therefor easier to use for these special
cases, which funnly cover a big part of what Lispers do with macros.


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9c404$fob$1@online.de>
Andr� Thieme schrieb:
> Joachim Durchholz schrieb:
> 
>> So in a pure language, you don't need macros - at least not for this 
>> reason.
> 
> Joachim, in Java, Lisp and Prolog you also don't need macros.

Oh, I often wished I had macros in Java.

> And what is a pure language? Is French pure? Or C#?

I meant the technical term (as in "free from side effects").
I had assumed that it was clear from the context.

> I suppose you meant "pure" in regards to side effects, as you mentioned
> it doesn't matter at what point an expression is calculated, as the
> result would always be the same.

Well, exactly.

> What you can do is to write big parts of your code without side effects.
> But for that you don't need Lisp or Haskell: ML would do.

By that reasoning, any language would do.

> Every meaningful program however needs side effects. Even if it is a
> very boring one, that takes no inputs, only calculates what it was given
> in hard code - it still must do something with the result.

*sigh*

You're being ignorant.
First, I have explained exactly this question more than once in this 
discussion, but you either missed or ignored it.
Second, the mere existence and practical usage of pure 
(side-effect-free) languages such as Haskell should convince you that 
there are loopholes in your reasoning - so why are you putting it up as 
if you were serious?
Third, I'm not going to explain it yet again. The issue should be a FAQ 
on http://haskell.org; go and read it up there if you're genuinely 
interested.

 > So, depending on the domain you
> work in you could write nearly everything in Lisp without side effects.
> Usually you would want side effects from time to time, because it can
> allow more clarity in code and sometimes faster execution.

Just in a nutshell to keep this post manageable:

If the language is pure, the compiler can optimize very aggressively 
because aliasing is a nonissue.
This means you can employ programming techniques that would be 
prohibitively costly in an impure language.

> Being forced to program in a specific style, which a language designer
> was thinking of for me, would for me personally not be acceptable, but
> this is of course only my opinion.

You're forced into a specific style in Lisp, too. (Otherwise C 
programmers would write C code in Lisp.)

> One thing that is correct: for many cases where a Lisper would use a
> macro one would solve it in Haskell with lazyness.

It's higher-order functions that can replace macros.
Laziness can help, but it's not really necessary.

 > But I don't understand
> your wording here. When you say "Haskell doesn't need macros for this"
> then it sounds as if lazyness would be an advantage.

I'm using Haskell as example language because it has a minimal syntax.
OCaml and SML have "more syntax", and the code that I have seen in these 
languages looks as if writing macro-like HOFs in them isn't as easy.

Regards,
Jo
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9djek$3kb$1@registered.motzarella.org>
Joachim Durchholz schrieb:
> Andr� Thieme schrieb:

>> Every meaningful program however needs side effects. Even if it is a
>> very boring one, that takes no inputs, only calculates what it was given
>> in hard code - it still must do something with the result.
> 
> *sigh*
> 
> You're being ignorant.

Why?

> First, I have explained exactly this question more than once in this 
> discussion, but you either missed or ignored it.

I missed these posts.


>> Being forced to program in a specific style, which a language designer
>> was thinking of for me, would for me personally not be acceptable, but
>> this is of course only my opinion.
> 
> You're forced into a specific style in Lisp, too. (Otherwise C 
> programmers would write C code in Lisp.)

First of all: my wording was not precise, so you are right.
Of course there is a limited number of "specific styles" which can be
grouped to one "specific style" in which one has to code in Lisp.
I was thinking about the paradigms, like functional programming,
procedural prg., OO prg., logical prg., domain specific prg.

Now to what you also said: you can write C code in Lisp.
Depending on what you mean: manual memory management, pointers, very
low level coding, goto, etc.


>> One thing that is correct: for many cases where a Lisper would use a
>> macro one would solve it in Haskell with lazyness.
> 
> It's higher-order functions that can replace macros.
> Laziness can help, but it's not really necessary.

In some sense I want to agree with you. HOF do the work, but implicit
lazyness makes your solution acceptable.
One could make CASE a HOF and then call it:
(hof-case obj
   14         (lambda () (print "Hahahaha"))
   "xyz"      (lambda () (print "It's a string")
   'otherwise (lamdba () (print "Default branch"))

With implicit lazyness we would have had something like this:
(hof-case obj
   14         (print "Haaaa")
   "abc"      (print "String")
   'otherwise 'default)


> I'm using Haskell as example language because it has a minimal syntax.
> OCaml and SML have "more syntax", and the code that I have seen in these 
> languages looks as if writing macro-like HOFs in them isn't as easy.

Yes. it also looks to me this way. Haskell has some nice concepts, and I
will study it some day, to become a better Lisp programmer.


Andr�
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjthij88k0d1e@corp.supernews.com>
Joachim Durchholz wrote:
> So in a pure language, you don't need macros...

I've used macros in OCaml to extend the pattern matcher to support new
sequence types with efficient pattern match compilation and infix operators
in patterns. The former can be done in F# using active patterns.

To the best of my knowledge, this cannot be done in Haskell because it lacks
both macros and views.

I assume by "you don't need macros" you mean something beyond a Turing
argument. Can you explain how one might achieve these things in a pure
language like Haskell without beyond asymptotically less efficient in the
former case and syntactically less efficient in the latter case?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9hbuh$mel$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> So in a pure language, you don't need macros...
> 
> I've used macros in OCaml to extend the pattern matcher to support new
> sequence types with efficient pattern match compilation and infix operators
> in patterns. The former can be done in F# using active patterns.
> 
> To the best of my knowledge, this cannot be done in Haskell because it lacks
> both macros and views.

Ah, right. Pattern matching is one area where Haskell isn't open to 
abstraction.
Views should solve that problem, no macros needed. Of course, that's an 
entirely theoretic assumption, unfalsifiable until somebody practical 
code with views arises.

> I assume by "you don't need macros" you mean something beyond a Turing
> argument.

Right. We're talking expressivity here, not expressibility.

 > Can you explain how one might achieve these things in a pure
> language like Haskell without beyond asymptotically less efficient in the
> former case and syntactically less efficient in the latter case?

I'd say Haskell is already nearly there, with the exception of views.

I haven't properly understood the "bindings" argument. The existence of 
the "do" notation for monads in Haskell is an indicator that macros 
might be useful - or it might be that Haskell's syntax isn't simple 
enough. (There are some irregularities in Haskell's indentation rules, 
some keywords trigger indentations, others don't. I have been assuming 
that it's these irregularities that have forced Haskell into extending 
for the do notation, but then I may be wrong.)

Regards,
Jo
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xir7mye6e.fsf@ruckus.brouhaha.com>
Joachim Durchholz <··@durchholz.org> writes:
> Views should solve that problem, no macros needed. Of course, that's
> an entirely theoretic assumption, unfalsifiable until somebody
> practical code with views arises.

http://hackage.haskell.org/trac/ghc/wiki/ViewPatterns says:

"We are about to begin prototyping this extension in GHC, so speak now
if you have comments or suggestions!"
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9kc7q$bfg$2@online.de>
Paul Rubin schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
>> Views should solve that problem, no macros needed. Of course, that's
>> an entirely theoretic assumption, unfalsifiable until somebody
>> practical code with views arises.
> 
> http://hackage.haskell.org/trac/ghc/wiki/ViewPatterns says:
> 
> "We are about to begin prototyping this extension in GHC, so speak now
> if you have comments or suggestions!"

Aaaah, finally!

(Views were held up by considerations of "better delay it rather than 
implement something half-assed".)

Regards,
jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqnud97o92a36@corp.supernews.com>
Joachim Durchholz wrote:
> Views should solve that problem, no macros needed. Of course, that's an
> entirely theoretic assumption, unfalsifiable until somebody practical
> code with views arises.

F# already has views and there is an OCaml macro implementing them, BTW.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <16%ri.28080$jN5.917568@phobos.telenet-ops.be>
Joachim Durchholz wrote:
> Gisle Sælensminde schrieb:
>> Jon Harrop <···@ffconsultancy.com> writes:
>>
>>> André Thieme wrote:
>>>
>>> How is Lisp any more programmable than, say, OCaml?
>>
>> One example is lisp macros that allow you to make domain-specific 
>> extansions.
> 
> A set of well-designed set of functions taking and constructing closures 
> is a domain-specific language, too.
> I have seen this technique applied in Haskell, and I see no reason why 
> it wouldn't work in any language with closures and HOFs (including Lisp).
> 
>> I have personally extended my code with macros for lex and yacc like 
>> lexers/parsers
>> inside my code. To my knowledgem this could not be done as easily in 
>> OCaml.
> 
> Use HOFs.
> They will usually run during execution time, so you might have 
> performance differences, but otherwise, the general flexibility should 
> be the same.
> 
>> Now this has not so much to do with dynamic typing of Lisp as it has 
>> with the
>> s-expression syntax.
> 
> I don't think that syntax plays a role for domain-specific sublanguages.
> 
> Sure, it's crucial for macros (you need a very regular syntax to make 
> writing macros easy enough), but you don't need macros for DSLs.
> 
> Regards,
> Jo

I often have a functional interface with HOFs, then add macro wrappers 
so that it is easier to the eye in the client source code. (when it 
makes sense)

That way I get the best of both worlds !

Sacha
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8q7nk$lih$1@online.de>
Sacha schrieb:
> I often have a functional interface with HOFs, then add macro wrappers 
> so that it is easier to the eye in the client source code. (when it 
> makes sense)

Not sure whether that's an argument for or against the Lisp way of doing 
things, though... I've seen some domain-specific languages done in 
Haskell, and they didn't seem "heavy on the eye" to me, even without macros.
I can't tell what exactly the reasons for that might be. Possibly 
because Haskell code uses less syntax than typical modern Lisps (both in 
the sense of "less keywords" and in the sense of "less parentheses", 
i.e. at the token and at the lexical level).

Regards,
Jo
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Ru2si.28355$05.1175835@phobos.telenet-ops.be>
Joachim Durchholz wrote:
> Sacha schrieb:
>> I often have a functional interface with HOFs, then add macro wrappers 
>> so that it is easier to the eye in the client source code. (when it 
>> makes sense)
> 
> Not sure whether that's an argument for or against the Lisp way of doing 
> things, though... I've seen some domain-specific languages done in 
> Haskell, and they didn't seem "heavy on the eye" to me, even without 
> macros.
> I can't tell what exactly the reasons for that might be. Possibly 
> because Haskell code uses less syntax than typical modern Lisps (both in 
> the sense of "less keywords" and in the sense of "less parentheses", 
> i.e. at the token and at the lexical level).
> 
> Regards,
> Jo

I wasn't really talking about DSLs. Just reacting on your statement 
about HOFs being good enough. I often find they're usefull but need a 
little more to ease the syntax.

That's merely (in this case) a problem about defining lambdas, that 
cannot be done without some kind of a syntax. By defining macros I avoid 
all these lambda forms which don't add any value to the meaning conveyed 
by the code (haskell also has syntax for this). They're still there, but 
under the hood.

Sacha
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8qdh7$slt$1@online.de>
Sacha schrieb:
> I wasn't really talking about DSLs. Just reacting on your statement 
> about HOFs being good enough. I often find they're usefull but need a 
> little more to ease the syntax.

Ah, OK.

> That's merely (in this case) a problem about defining lambdas, that 
> cannot be done without some kind of a syntax. By defining macros I avoid 
> all these lambda forms which don't add any value to the meaning conveyed 
> by the code (haskell also has syntax for this). They're still there, but 
> under the hood.

Actually, it goes a bit beyond that. Not only puts currying 90% of 
lambdas "under the hood", it also enables programmers to work with 
functions as normal parts of an expression

E.g. the traditional way to write a function that adds 1 to its 
parameter is something along the lines of
   ((lambda x) (plus 1 x))
(an AST with three nodes and 5 terminals) but it would be simply
   (plus 1)
(one node, three terminals) in curried code.

The latter is not just just syntactic sugar for the lambda term.
It actively promotes a mode of programming where you mentally 
reconfigure the functions themselves, not the data that they are 
processing ("point-free style" is a closely related term).

Regards,
Jo
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Kxasi.29464$6B1.997551@phobos.telenet-ops.be>
Joachim Durchholz wrote:

> Actually, it goes a bit beyond that. Not only puts currying 90% of 
> lambdas "under the hood", it also enables programmers to work with 
> functions as normal parts of an expression

Yes I loved that when playing with haskell. (though 90% seems a bit high 
a figure).

Ahwell can't have the cake and eat it too =P

Sacha
From: =?UTF-8?B?QW5kcsOpIFRoaWVtZQ==?=
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8rtfe$jqp$1@registered.motzarella.org>
Joachim Durchholz schrieb:

> Actually, it goes a bit beyond that. Not only puts currying 90% of 
> lambdas "under the hood", it also enables programmers to work with 
> functions as normal parts of an expression

Download lots of lisp sources and search for lambdas. You will find
that a much smaller number of them can be expressed in terms of
currying. The reason for that is that most lisp programs are not
written in purely functional programming style. If they were there
would be a bigger need for currying. But if you actually look at
lots of Lisp code you will see that it will not be a "killer feature".


> E.g. the traditional way to write a function that adds 1 to its 
> parameter is something along the lines of
>   ((lambda x) (plus 1 x))
> (an AST with three nodes and 5 terminals) but it would be simply
>   (plus 1)
> (one node, three terminals) in curried code.

Yes, it looks very cool when your program is only one line long.
It is trivial to add currying to Lisp and I did that and make use of
it. But still, it mostly saves 3 "tokens" or "nodes" or however you
want to call them. So in a typical 28k LOC Lisp program in the end it
will have saved you more or less nothing.

How can one curry the n-th argument away in Haskell? If we have a,
say, 3-ary function RGB which returns a color object.
How can you create a (potentially very) green object? I would say:
[rgb 0 _ 0]

And how can one do multi-currying in Haskell?
[rgb _ 255 _]


> The latter is not just just syntactic sugar for the lambda term.
> It actively promotes a mode of programming where you mentally 
> reconfigure the functions themselves,

To some part I can agree. Currying is really nothing more than
syntactical sugar that will help you to save 3 tokens every
1000 tokens (or even worse when the style is not very functional).
But then again it promotes a more functional programming style.
Currying is one feature that I liked so much in OCaml and Haskell
that I implemented it into Lisp as well.


André
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f8sbkr$u3t$1@online.de>
André Thieme schrieb:
> Yes, it looks very cool when your program is only one line long.
> It is trivial to add currying to Lisp and I did that and make use of
> it. But still, it mostly saves 3 "tokens" or "nodes" or however you
> want to call them. So in a typical 28k LOC Lisp program in the end it
> will have saved you more or less nothing.

Haskell library code says otherwise.

Currying isn't very powerful for order-0 functions.
However, for a higher-order function, you can pass through functions 
without caring how many parameters they actually take. I.e.

   twice X fn = fn (X + X)

will work the same whether fn takes one, two, five or a dozen 
parameters. If you insist on spelling out parameters, this is equivalent to

   twice X fn = fn (X + X)
   twice X fn Y1 = fn (X + X) Y1
   twice X fn Y1 Y2 = fn (X + X) Y1 Y2
   twice X fn Y1 Y2 Y3 = fn (X + X) Y1 Y2 Y3
   twice X fn Y1 Y2 Y3 Y4 = fn (X + X) Y1 Y2 Y3 Y4
   etc. etc. etc.

(Oh, and it doesn't matter which types those Y1... have. The only type 
that the above code nails down is that X must be of a numeric type, 
since it's used with addition.)

> How can one curry the n-th argument away in Haskell? If we have a,
> say, 3-ary function RGB which returns a color object.
> How can you create a (potentially very) green object? I would say:
> [rgb 0 _ 0]

Not worth the syntactical overhead. This would be written uncurried:

   green X = rgb 0 X 0

One could write this as

   swap12 f a b = f b a
   green = swap12 rgb 0 0

The latter line is equivalent to

   green = (swap12 rgb) 0 0

> And how can one do multi-currying in Haskell?
> [rgb _ 255 _]

Currying always handles all rest parameters, so this would be

   saturategreen = swap rgb 255

The uncurried meaning of saturategreen can be inferred like this:

   saturategreen R B
     = (swap rgb 255) R B -- replace saturategree with its definition
     = (\x swap rgb 255 x) R B -- "lambdify" first param for next step
     = (\x rgb x 255) R B -- replace swap with its definition
     = (rgb R 255) B -- (rgb R 255) is a function of one parameter
     = rgb R 255 B

Note that this "substitute in the function bodies" technique (a.k.a. 
"equational reasoning") is the semantics of function evaluation in 
Haskell, so the above accurately models what's happening.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b1b496$0$1635$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> > It is trivial to add currying to Lisp
>
> Haskell library code says otherwise.
> 
> Currying isn't very powerful for order-0 functions.

Higher-order functions are prohibitively difficult to write without a static
type system.

This is why currying, higher-order functions, closures, continuations and
all other non-trivial functional techniques are ubiquitous in Haskell, F#
and OCaml but almost unheard of in Lisp/Scheme.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: ······@corporate-world.lisp.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186059507.547085.69820@d55g2000hsg.googlegroups.com>
On Aug 2, 12:31 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Joachim Durchholz wrote:
> > > It is trivial to add currying toLisp
>
> > Haskell library code says otherwise.
>
> > Currying isn't very powerful for order-0 functions.
>
> Higher-order functions are prohibitively difficult to write without a static
> type system.
>
> This is why currying, higher-order functions, closures, continuations and
> all other non-trivial functional techniques are ubiquitous in Haskell, F#
> and OCaml but almost unheard of in Lisp/Scheme.

This is more total bullshit from 'Dr' Harrop.

>
> --
> Dr Jon D Harrop, Flying Frog Consultancy
> OCaml for Scientistshttp://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <vz4pjhprfy.fsf@hod.lan.m-e-leypold.de>
'joswig AT corporate-world DOT lisp DOT de' wrote:

> On Aug 2, 12:31 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> Joachim Durchholz wrote:
>> > > It is trivial to add currying toLisp
>>
>> > Haskell library code says otherwise.
>>
>> > Currying isn't very powerful for order-0 functions.
>>
>> Higher-order functions are prohibitively difficult to write without a static
>> type system.
>>
>> This is why currying, higher-order functions, closures, continuations and
>> all other non-trivial functional techniques are ubiquitous in Haskell, F#
>> and OCaml but almost unheard of in Lisp/Scheme.
>
> This is more total bullshit from 'Dr' Harrop.

He is, I think, talking about automatic (?) currying, that is

  # let f x y = x * y ;;
  val f : int -> int -> int = <fun>
  # let g = f 10 ;;
  val g : int -> int = <fun>

I imagine this being difficult to implement (in the
compiler/interpreter) without a static type system and type inference,
but YMMV.

Ooops. Didn't I have the resolution not to reply to one of RJ's posts
any more? On the other side the phrasing "more total bullshit"
strikingly demonstrates some of the points I tried to make in my last
replies to him. I know, drivel. 

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b25e64$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. 2 wrote:
> He is, I think, talking about automatic (?) currying, that is
> 
>   # let f x y = x * y ;;
>   val f : int -> int -> int = <fun>
>   # let g = f 10 ;;
>   val g : int -> int = <fun>
> 
> I imagine this being difficult to implement (in the
> compiler/interpreter) without a static type system and type inference,
> but YMMV.

Actually I was referring to this kind of thing:

  http://citeseer.ist.psu.edu/163183.html

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tns6$bo4$1@news.xmission.com>
Jon Harrop wrote:
> Markus E.L. 2 wrote:
>> He is, I think, talking about automatic (?) currying, that is
>>
>>   # let f x y = x * y ;;
>>   val f : int -> int -> int = <fun>
>>   # let g = f 10 ;;
>>   val g : int -> int = <fun>
>>
>> I imagine this being difficult to implement (in the
>> compiler/interpreter) without a static type system and type inference,
>> but YMMV.
> 
> Actually I was referring to this kind of thing:
> 
>   http://citeseer.ist.psu.edu/163183.html

Which is *exactly* the kind of thing Scheme handles just fine. I know 
because I've done it.

Does a type system make doing it easier? You bet. Does the lack of a 
type system make it "prohibitively difficult"? Not even close.

-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <6xir7xldyu.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> Jon Harrop wrote:
>> Markus E.L. 2 wrote:
>>> He is, I think, talking about automatic (?) currying, that is
>>>
>>>   # let f x y = x * y ;;
>>>   val f : int -> int -> int = <fun>
>>>   # let g = f 10 ;;
>>>   val g : int -> int = <fun>
>>>
>>> I imagine this being difficult to implement (in the
>>> compiler/interpreter) without a static type system and type inference,
>>> but YMMV.
>> Actually I was referring to this kind of thing:
>>   http://citeseer.ist.psu.edu/163183.html
>
> Which is *exactly* the kind of thing Scheme handles just fine. I know
> because I've done it.
>
> Does a type system make doing it easier? You bet. Does the lack of a
> type system make it "prohibitively difficult"? Not even close.

OK. We are getting somewhere. But Scheme doesn't do automatic
currying, or does it? Do you have any reference handy how currying is
done in Scheme (but not manually by writing a lambda: I can do that
myself :-).

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6BF427.23443002082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

> 'joswig AT corporate-world DOT lisp DOT de' wrote:
> 
> > On Aug 2, 12:31 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> >> Joachim Durchholz wrote:
> >> > > It is trivial to add currying toLisp
> >>
> >> > Haskell library code says otherwise.
> >>
> >> > Currying isn't very powerful for order-0 functions.
> >>
> >> Higher-order functions are prohibitively difficult to write without a static
> >> type system.
> >>
> >> This is why currying, higher-order functions, closures, continuations and
> >> all other non-trivial functional techniques are ubiquitous in Haskell, F#
> >> and OCaml but almost unheard of in Lisp/Scheme.
> >
> > This is more total bullshit from 'Dr' Harrop.
> 
> He is, I think, talking about automatic (?) currying, that is
> 
>   # let f x y = x * y ;;
>   val f : int -> int -> int = <fun>
>   # let g = f 10 ;;
>   val g : int -> int = <fun>
> 
> I imagine this being difficult to implement (in the
> compiler/interpreter) without a static type system and type inference,
> but YMMV.
> 
> Ooops. Didn't I have the resolution not to reply to one of RJ's posts
> any more? On the other side the phrasing "more total bullshit"
> strikingly demonstrates some of the points I tried to make in my last
> replies to him. I know, drivel. 
> 
> Regards -- Markus


* Currying is usually done via a library function.
  Some Lisp dialects have a curry operator.
  http://oop.rosweb.ru/dylan/book.annotated/ch17.html#curry0

  'Automatic Currying' is usually not used. It does
  not play well with the syntax, obfuscates code and is
  not compatible, for example, with the parameter lists
  of Common Lisp.

http://cl-cookbook.sourceforge.net/functions.html#curry
http://obfuscatedcode.blogspot.com/2005/05/currying-in-common-lisp.html
http://www.engr.uconn.edu/~jeffm/Papers/curry.html

'Automatic Currying' (Implicit Currying) is not popular in Lisp
because of a the lack of static typing. It just mixes
not well with the syntax and the parameter list facilities.
Common Lisp for example has (optional) keyword arguments (with defaults)
and optional arguments (with defaults).


* higher-oder functions

  Lisp uses higher-order functions
  (functions that take other functions as parameters
  or return functions).
  Higher-order functions are typical in application code
  and libraries.

* Closures. Please. Lisp has closures. Lisp users use closures.
  We love closures.

* Continuations. Please.
  Scheme was among the first languages to have continuations
  figured out. Common Lisp has limited continuations,
  some libaries implement them. Continuations
  are being used/implemented in application code, for example
  in continuation-based web libraries.

http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-9.html#%_idx_566

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <9dtzrhmu7z.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L. 2) wrote:
>
>> 'joswig AT corporate-world DOT lisp DOT de' wrote:
>> 
>> > On Aug 2, 12:31 pm, Jon Harrop <····@ffconsultancy.com> wrote:
>> >> Joachim Durchholz wrote:
>> >> > > It is trivial to add currying toLisp
>> >>
>> >> > Haskell library code says otherwise.
>> >>
>> >> > Currying isn't very powerful for order-0 functions.
>> >>
>> >> Higher-order functions are prohibitively difficult to write without a static
>> >> type system.
>> >>
>> >> This is why currying, higher-order functions, closures, continuations and
>> >> all other non-trivial functional techniques are ubiquitous in Haskell, F#
>> >> and OCaml but almost unheard of in Lisp/Scheme.
>> >
>> > This is more total bullshit from 'Dr' Harrop.
>> 
>> He is, I think, talking about automatic (?) currying, that is
>> 
>>   # let f x y = x * y ;;
>>   val f : int -> int -> int = <fun>
>>   # let g = f 10 ;;
>>   val g : int -> int = <fun>
>> 
>> I imagine this being difficult to implement (in the
>> compiler/interpreter) without a static type system and type inference,
>> but YMMV.
>> 
>> Ooops. Didn't I have the resolution not to reply to one of RJ's posts
>> any more? On the other side the phrasing "more total bullshit"
>> strikingly demonstrates some of the points I tried to make in my last
>> replies to him. I know, drivel. 
>> 
>> Regards -- Markus
 

> 'Automatic Currying' (Implicit Currying) is not popular in Lisp
> because of a the lack of static typing. It just mixes

From the context of the thread I concluded that "automatic currying"
was exactly what Jon was talking about -- omitting the 'automatic': A
more forgiving and less confrontational reading of Jons post should
have helped anyone enabled to spot this.

Aber ich d�chte wir w�ren eh' �bereingekommen, dass das alles nichts
bringt, oder?

Gr�ssle -- Markus
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8sitp$uht$1@news.xmission.com>
Jon Harrop wrote:
> Joachim Durchholz wrote:
>>> It is trivial to add currying to Lisp
>> Haskell library code says otherwise.
>>
>> Currying isn't very powerful for order-0 functions.
> 
> Higher-order functions are prohibitively difficult to write without a static
> type system.
> 
> This is why currying, higher-order functions, closures, continuations and
> all other non-trivial functional techniques are ubiquitous in Haskell, F#
> and OCaml but almost unheard of in Lisp/Scheme.

This is more complete nonsense. Yes, a type system makes life a lot 
easier, but a lack of one does not make using higher-order functions 
prohibitively difficult. I don't have any experience with Lisp, but in 
Scheme, higher-order functions are quite natural. Scheme was the 
language that taught me higher-order functions. Ditto continuations.

I'm a fan of static type systems, and my preferred language is Standard 
ML, and maybe you know how to program in OCaml, but you really don't 
know what you're talking about when it comes to Scheme, and it's getting 
really old.

Oh, and one more thing that's been bothering me. A long time ago you 
challenged my claim that vertex arrays were "immediate mode." You 
referenced a paper from Apple and a paper from HP that happened to make 
a distinction between immediate mode and vertex arrays. Here's how the 
OpenGL Spec itself defines immediate mode: "For the most part, OpenGL 
provides an immediate-mode interface, meaning that specifying an object 
causes it to be drawn." (Section 1.3 of The OpenGL Graphics System, A 
Specification (Version 2.0 - October 22, 2004)). So you (and the papers 
you referenced) were simply wrong about that too.

-thant
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b225e3$0$1624$ed2619ec@ptn-nntp-reader02.plus.net>
Thant Tessman wrote:
> This is more complete nonsense. Yes, a type system makes life a lot
> easier, but a lack of one does not make using higher-order functions
> prohibitively difficult. I don't have any experience with Lisp, but in
> Scheme, higher-order functions are quite natural. Scheme was the
> language that taught me higher-order functions. Ditto continuations.

Try writing some non-trivial higher-order code in Lisp or Scheme and you'll
see what I mean.

> So you (and the papers you referenced) were simply wrong about that too.

I'll be sure to tell the OpenGL vendors to refer to "Thant's immediate mode"
in future...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8th7g$uvl$1@news.xmission.com>
Jon Harrop wrote:
> Thant Tessman wrote:
>> This is more complete nonsense. Yes, a type system makes life a lot
>> easier, but a lack of one does not make using higher-order functions
>> prohibitively difficult. I don't have any experience with Lisp, but in
>> Scheme, higher-order functions are quite natural. Scheme was the
>> language that taught me higher-order functions. Ditto continuations.
> 
> Try writing some non-trivial higher-order code in Lisp or Scheme and you'll
> see what I mean. [...]

I have. A lot of it in fact. So have a few of my friends and coworkers. 
So have a few people who write whole books on the topic.

At the end of this post I've appended a little exercise I wrote 
specifically to illustrate to non-FP-programmers how powerful functional 
programming can be. This version is written in SML. The first version of 
this was written a few years earlier in Scheme, was roughly the same 
size, and took somewhere around three hours to get working once I came 
up with the idea. I've also built a cooperative multitasking threader on 
top of call/cc in Scheme as well. I understand I'm posting in a forum 
frequented by some pretty talented programmers, but still, these aren't 
exactly trivial hacks if I may be so bold.

In short, Scheme is *really* good at functional programming. My 
understanding is that Common Lisp is as well. All evidence is that your 
statements to the contrary are made from a position of complete ignorance.

-thant

***



(*

    This is a lexer generator comprised in its core of only four
    small functions. The programmer assembles these functions into
    regular expression pattern-matching functions.

    The idea is that a pattern matcher function takes a list of
    streams, and returns a new list of streams advanced by every
    combination allowed by the pattern matcher function. Note that
    the number of streams returned by the function typically won't
    match the number of streams passed in. If the pattern doesn't
    match at all, the empty list is returned. In this implementation,
    a stream is simply a tuple containing a list of characters
    consumed by the pattern matcher, and a list of characters not
    yet consumed.

    I've added a lot of explanatory comments, but it's worth noting
    that without them, this thing fits on a single page.

*)


(* The first function 'tok' builds a pattern matcher function
    that matches a single specified token (character). *)

fun tok t =
     let fun f (s,h::r) = if h=t then SOME (h::s,r) else NONE
           | f _ = NONE
     in List.mapPartial f
     end


(* This matches a sequence of patterns. *)

fun seq rules streams =
     foldl (fn (f, s) => f s) streams rules


(* This matches any of a list of patterns. It's analogous to
     a series of patterns separated by the '|' in traditional
     regular expressions. *)

fun bar rules streams =
     List.concat (map (fn f => f streams) rules)


(* Kleene closure. Analogous to '*' *)

fun star rule streams =
     let fun loop streams =
             case (rule streams) of
                 [] => []
               | results => results::(loop results)
     in List.concat (streams::(loop streams))
     end


(* The rest of these are built from the previous four and
     are provided for convenience. *)

(* Positive closure. Analogous to '+' *)

fun pos rule = seq [rule, star rule]


(* Optional pattern. Analogous to '?' *)

fun opt rule = bar [rule, fn x => x]


(* Range of characters. Analogous to the dash within\
     a character class '[]' *)

fun range (a,b) =
     if b<a then range (b,a) else
     let fun loop i = if (i<=Char.ord b)
                      then (tok (Char.chr i)) :: loop (i+1)
                      else []
        in bar (loop (Char.ord a))
        end


(* Matches a literal string specified by 's' *)

fun lit s = seq (map tok (String.explode s))


(* Matches any of a set of characters. *)

fun set s = bar (map tok (String.explode s))


(* The next two functions are for demonstrating the use of
     the above functions. This first function takes the resulting
     streams produced by the application of a pattern on a stream
     (or streams) and selects the longest match if one exists. *)

fun longest streams =
     let
         fun loop [] = (0,([],[]))
           | loop ((eaten, food)::rest) =
             let
                 val (max,stream) = loop rest
                 val l = List.length eaten
             in
                 if l<max then (max,stream)
                 else (l, ((List.rev eaten), food))
             end
         val (count,stream) = loop streams
     in
         if count=0 then NONE else SOME stream
     end


(* This takes a rule and a string, turns the string into
     a list of streams (containing one stream), applies the
     rule, and returns the longest match. *)

fun lex rule s = longest (rule [([],String.explode s)])


(* A demonstration of the above. Here's a pattern to match
     floating point numbers. *)

(* "-"?(([0-9]+(\\.[0-9]+)?)|(\\.[0-9]+))([eE][+-]?[0-9]+)? *)

val digit = range (#"0",#"9")
val digits = (pos digit)
val mantissa = seq [tok #".", digits]
val exp = seq [set "eE", opt (set "+-"), digits]
val real = (seq [opt (tok #"-"),
                  (seq [bar [seq [digits, opt mantissa], mantissa],
                        opt exp])])

(*

With the above defined, you can do this:

- lex real "3.45e-6";
val it = SOME ([#"3",#".",#"4",#"5",#"e",#"-",#"6"],[])
   : (char list * char list) option
- lex real "hi there";
val it = NONE : (char list * char list) option

*)
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b263c4$0$1605$ed2619ec@ptn-nntp-reader02.plus.net>
Thant Tessman wrote:
> ...The first version of this was written a few years earlier in Scheme,
> was roughly the same size, and took somewhere around three hours to get
> working once I came up with the idea.

This is an excellent example, thank you. If you look at the inferred types:

val tok = fn : ''a -> (''a list * ''a list) list -> (''a list * ''a list)
list

val seq = fn : ('a -> 'a) list -> 'a -> 'a

val bar = fn : ('a -> 'b list) list -> 'a -> 'b list

val star = fn : ('a list -> 'a list) -> 'a list -> 'a list

val pos = fn : ('a list -> 'a list) -> 'a list -> 'a list

val opt = fn : ('a list -> 'a list) -> 'a list -> 'a list

val range = fn
  : char * char
    -> (char list * char list) list -> (char list * char list) list

val lit = fn
  : string -> (char list * char list) list -> (char list * char list) list

val set = fn
  : string -> (char list * char list) list -> (char list * char list) list

val longest = fn : ('a list * 'b list) list -> ('a list * 'b list) option

val digit = fn : (char list * char list) list -> (char list * char list)
list
val digits = fn : (char list * char list) list -> (char list * char list)
list
val mantissa = fn
  : (char list * char list) list -> (char list * char list) list
val exp = fn : (char list * char list) list -> (char list * char list) list
val real = fn : (char list * char list) list -> (char list * char list) list

As you can see, this idiomatic Scheme code uses nothing higher than 2nd
order functions (seq, bar, star, pos, opt) and none of these 2nd order
functions even accept curried function arguments. This is what you might
call "low-order functional programming".

Compare this to Chris Okasaki's paper about even higher-order functions
where he uses 3rd, 4th, 5th and even 6th order functions in the same
context:

  http://citeseer.ist.psu.edu/163183.html

You don't have to look very far to find these "even higher-order" functions
in MLs. A memoize function that can memoize recursive calls is a trivial
example of a 3rd order function:

# let memoize f =
    let hash = Hashtbl.create 1 in
    let rec f' a =
      try Hashtbl.find hash a with Not_found ->
        let b = f f' a in
        Hashtbl.add hash a b;
        b in
    f';;
val memoize : (('a -> 'b) -> 'a -> 'b) -> 'a -> 'b = <fun>

A quick scan of our code base shows many functions up to 4th order. Static
typing makes this possible.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186099294.631888.76770@j4g2000prf.googlegroups.com>
On Aug 2, 3:58 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> You don't have to look very far to find these "even higher-order" functions
> in MLs. A memoize function that can memoize recursive calls is a trivial
> example of a 3rd order function:
>
> A quick scan of our code base shows many functions up to 4th order. Static
> typing makes this possible.

Some of the rest of us can write memoize for recursive functions
without static typing.  If Harrop can't....
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tq5j$bgk$1@news.xmission.com>
Jon Harrop wrote:
> Thant Tessman wrote:

[...]

> As you can see, this idiomatic Scheme code uses nothing higher than 2nd
> order functions (seq, bar, star, pos, opt) and none of these 2nd order
> functions even accept curried function arguments. This is what you might
> call "low-order functional programming".
> 
> Compare this to Chris Okasaki's paper about even higher-order functions
> where he uses 3rd, 4th, 5th and even 6th order functions in the same
> context:
> 
>   http://citeseer.ist.psu.edu/163183.html

[...]

Then you don't understand the example. The 'order' of the functions in 
my example is completely a function of the depth of the pattern function 
you construct, which is totally arbitrary. Although I didn't know it at 
the time, what I built *was* a parser combinator library. And I built it 
in Scheme. (I have a printout of it in a box somewhere, but I'm not 
motivated enough to dig it out and type it in.)

Your association of order with number of explicitly curried items seems 
to be confusing you. When you see the 'a type parameter in e.g. the pos 
function's type signature, you have to understand that it will typically 
be parameterized as yet another first-order function, which in turn is 
type-parameterized as something that can be yet another first-order 
function, which...

As I said elsewhere, automatic currying is a red herring.

-thant
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8tr2u$m4j$1@news.xmission.com>
I (Thant Tessman) wrote:

> [...] When you see the 'a type parameter in e.g. the pos 
> function's type signature, you have to understand that it will typically 
> be parameterized as yet another first-order function, [...]

I think that should be "second-order function".

-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5u1weljxq7.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:
>
> As I said elsewhere, automatic currying is a red herring.

Which you failed to make plausible. Red herring to what? How can a
language property be a red herring?

Regards -- Markus
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8u5id$m56$1@news.xmission.com>
Markus E.L. 2 wrote:
> Thant Tessman wrote:
>> As I said elsewhere, automatic currying is a red herring.
> 
> Which you failed to make plausible. Red herring to what? How can a
> language property be a red herring?

Automatic currying is syntactic sugar. It's been a while but I'm pretty 
sure it could be added to Scheme with a macro. Basically:

	fun f a b ... = <body>;

which in Scheme might look something like

	(fun f a b ... <body>)

becomes

	(define f (lambda (a) (lambda (b) ... <body>)))

The point is that this is not the point. Lambda is the point.

SML's (and OCaml's) pattern matching buys you some conciseness and 
rigor. Its type system buys you some logic sanity checks and performance 
optimizations. These are good things. In some situations they're really 
good things. The difference between my Scheme implementation of parser 
combinators and my SML parser combinators is that I had to do more 
testing to convince myself I got the Scheme version right. And the SML 
version is very likely faster. But that's it. And for the most part, for 
me, that's enough.

-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <jvps2261ny.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> Markus E.L. 2 wrote:
>> Thant Tessman wrote:
>>> As I said elsewhere, automatic currying is a red herring.
>> Which you failed to make plausible. Red herring to what? How can a
>> language property be a red herring?
>
> Automatic currying is syntactic sugar. It's been a while but I'm
> pretty sure it could be added to Scheme with a macro. Basically:
>
> 	fun f a b ... = <body>;
>
> which in Scheme might look something like
>
> 	(fun f a b ... <body>)
>
> becomes
>
> 	(define f (lambda (a) (lambda (b) ... <body>)))
>
> The point is that this is not the point. Lambda is the point.

No. the point is the "automatic". The original claim was that
[automatic] currying makes programs shorter and (arguably) more
concise, not that there is no currying possible in Scheme or
Lisp. With explicit lambdas of course the program doesn't become
shorter.

Regards -- Markus
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f974pf$tm4$1@news.xmission.com>
Markus E.L. 2 wrote:
> Thant Tessman wrote:

[...]

>> The point is that this is not the point. Lambda is the point.
> 
> No. the point is the "automatic". The original claim was that
> [automatic] currying makes programs shorter and (arguably) more
> concise, not that there is no currying possible in Scheme or
> Lisp. With explicit lambdas of course the program doesn't become
> shorter.

The currying you could add to Scheme with a macro is just as "automatic" 
as the currying in OCaml. And they would compile into the same basic 
code (modulo the type system). Yet as far as I know, no one bothers to 
add automatic currying to Scheme. One reason might be that it's even 
more trivial to write a function that does partial application. Paul 
Rubin already mentioned how elsewhere in this thread:

   (define (curry f a) (lambda args (apply f (cons a args))))

Automatic currying really is syntactic sugar. It really isn't the 
important part.

Having said all that, please not that I wasn't responding to that claim 
anyway. I was responding to the claim that it was "prohibitively 
difficult" to write higher-order functions without a static type system. 
A static type system most definitely allows you to arrive at bug-free 
code more quickly, but as I said, the lack of a static type system does 
not make programming higher-order functions "prohibitively difficult." 
Only someone who doesn't have any significant experience in Scheme or 
Common Lisp would make such a claim.

-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7tejiglohj.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> Markus E.L. 2 wrote:
>> Thant Tessman wrote:
>
> [...]
>
>>> The point is that this is not the point. Lambda is the point.
>> No. the point is the "automatic". The original claim was that
>> [automatic] currying makes programs shorter and (arguably) more
>> concise, not that there is no currying possible in Scheme or
>> Lisp. With explicit lambdas of course the program doesn't become
>> shorter.
>
> The currying you could add to Scheme with a macro is just as
> "automatic" as the currying in OCaml. And they would compile into the
> same basic code (modulo the type system). Yet as far as I know, no one
> bothers to add automatic currying to Scheme. One reason might be that
> it's even more trivial to write a function that does partial
> application. Paul Rubin already mentioned how elsewhere in this thread:
>
>    (define (curry f a) (lambda args (apply f (cons a args))))
>
> Automatic currying really is syntactic sugar. It really isn't the
> important part.

It is in this sub-discussion since the claim was that (a) automatic
currying make code shorter (so you'd really like to avoid the extra
'curry' token) and (b) automatic currying needs the type information
to know when the application is complete.

I'm admittedly not completely sure about (b), but at the moment I
don't see any other way (and I'm not going do much thinking into it at
the moment either).

> Having said all that, please not that I wasn't responding to that
> claim anyway. I was responding to the claim that it was "prohibitively
> difficult" to write higher-order functions without a static type
> system. 

It's b which Jon argued makes it muc easiert o maintain the various
variants of higher order functions because currying has not to be
explicit. Nobody said is would be impossible, but I not that it is
languages like OCaml that have automatic currying that made
systtematic, one might say gratitious, currying popular as
implementation technique.


> A static type system most definitely allows you to arrive at
> bug-free code more quickly, but as I said, the lack of a static type
> system does not make programming higher-order functions "prohibitively
> difficult." 

Not higher order functions. That AFAIR was not the claim. Using
currying systematically and everywhere, that was the claim.

>Only someone who doesn't have any significant experience
> in Scheme or Common Lisp would make such a claim.

Well -- do Lisp people use currying everywhere? Or don't they and they
only _can_ use it every time they want. If the answer to the first
question is 'no', why is that so?

Regards -- Markus
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9auq8$umc$1@registered.motzarella.org>
Markus E.L. 2 schrieb:
> Thant Tessman wrote:

>>    (define (curry f a) (lambda args (apply f (cons a args))))
>>
>> Automatic currying really is syntactic sugar. It really isn't the
>> important part.
> 
> It is in this sub-discussion since the claim was that (a) automatic
> currying make code shorter (so you'd really like to avoid the extra
> 'curry' token) and (b) automatic currying needs the type information
> to know when the application is complete.
> 
> I'm admittedly not completely sure about (b), but at the moment I
> don't see any other way (and I'm not going do much thinking into it at
> the moment either).

I can then tell you that no type information is needed.
Type information and currying have not much to do with each other.
I don't like the term "automatic currying" but prefer "implicit
currying" over it.


>> Only someone who doesn't have any significant experience
>> in Scheme or Common Lisp would make such a claim.
> 
> Well -- do Lisp people use currying everywhere? Or don't they and they
> only _can_ use it every time they want. If the answer to the first
> question is 'no', why is that so?

I agree with you in this point. From looking at freely available Lisp
code I found that currying is more or less never used, but instead
this explicit lambda.
I don't like that, so I got implicit currying in my code. Ok.. it is
not really implicit - you have to embed the expression in [].
But this is very preferrable for Lisp. It gives the eye an element
on which it can hang on. Every time currying is used the programmer
must be aware of it anyway. So, instead of making it fully implicit,
like in (* 3) I would say [* 3]. In Lisp this has some importance also
because otherwise there were ambiguities.
All functions that take any number of args would have this potential
problem. Is (* x) meant as exactly that (returning x in that case),
or did the author want to say (lambda (args) (apply #'* x args))?
Also it allows me to curry away the nth argument or multiple arguments:
[rgb _ _ 255]

When you analyze lots of Lisp code you will only find few places where
currying makes sense. This is because most programs are only partially
written in functional style. One reason for this might be because there
was no implicit currying in Lisp :-)


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9c2ka$e4m$1@online.de>
Markus E.L. 2 schrieb:
> (b) automatic currying needs the type information
> to know when the application is complete.

For a strict evaluator, the number of parameters that a function 
consumes must be constant. I.e. you need a minimal type system that 
knows just functions and arities, and you need to enforce that every 
alternate execution path through a function consumes the same number of 
parameters.

A nonstrict evaluator wouldn't care. Partly evaluating subexpressions 
and storing them away for later is normal business anyway.

Regards,
Jo
From: Holger Siegel
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46bb103d$1@news.arcor-ip.de>
Joachim Durchholz <··@durchholz.org> schrieb:
> Markus E.L. 2 schrieb:
>> (b) automatic currying needs the type information
>> to know when the application is complete.
>
> For a strict evaluator, the number of parameters that a function 
> consumes must be constant.

 In the presence of automatic currying, the number of parameters is
 constant: 1. The SECD machine is able to evaluate functions with an
 arbitrary number of parameters in strict evaluation order.

> I.e. you need a minimal type system that 
> knows just functions and arities, and you need to enforce that every 
> alternate execution path through a function consumes the same number of 
> parameters.

 When there's no type system that distinguishes between procedures and
 values, then there's no need to check if a function call is saturated.
 You can still protect calls to primitive fuctions by surrounding them
 with lambda expressions: let add = (\x y -> #add x y).
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <5hdhkdF3jv0dbU1@mid.individual.net>
Joachim Durchholz wrote:
> Sacha schrieb:
>> I often have a functional interface with HOFs, then add macro wrappers 
>> so that it is easier to the eye in the client source code. (when it 
>> makes sense)
> 
> Not sure whether that's an argument for or against the Lisp way of doing 
> things, though... I've seen some domain-specific languages done in 
> Haskell, and they didn't seem "heavy on the eye" to me, even without 
> macros.
> I can't tell what exactly the reasons for that might be. Possibly 
> because Haskell code uses less syntax than typical modern Lisps (both in 
> the sense of "less keywords" and in the sense of "less parentheses", 
> i.e. at the token and at the lexical level).

Lisp macros were introduced in the 60's as a (compilable) subset of 
fexprs, that is, functions that receive unevaluated arguments, which was 
a feature of Lisp 1.5 and many other subsequent Lisps. This allows you 
to control evaluation of such arguments, among other things. Lazy 
evaluation is a different way to get similar control of evaluation, and 
monads can be seen as a different packaging of fexprs. [1] My guess 
would be that it is because of these broad similarities that both 
Haskell and Lisp are good starting points for DSLs.


Pascal

[1] This is, of course, a gross oversimplification.

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b1b630$0$1626$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> A set of well-designed set of functions taking and constructing closures
> is a domain-specific language, too.

That covers a smaller set of DSLs, of course.

> I have seen this technique applied in Haskell, and I see no reason why
> it wouldn't work in any language with closures and HOFs (including Lisp).

That works, but the rest doesn't work in Haskell. If you have a foreign
syntax for you DSL with a different grammar and you must compile it into
your host language, you need macros.

>> I have personally extended my code with macros for lex and yacc like
>> lexers/parsers inside my code. To my knowledgem this could not be done as
>> easily in OCaml.
> 
> Use HOFs.

OCaml has macros for embedded lexers and parsers so I would use those.

> They will usually run during execution time, so you might have
> performance differences, but otherwise, the general flexibility should
> be the same.

OCaml's macros can be run at compile time.

>> Now this has not so much to do with dynamic typing of Lisp as it has with
>> the s-expression syntax.
> 
> I don't think that syntax plays a role for domain-specific sublanguages.

Yes, this is a red herring. OCaml gets on just fine without s-exprs...

> Sure, it's crucial for macros (you need a very regular syntax to make
> writing macros easy enough), but you don't need macros for DSLs.

I disagree on both counts. OCaml has shown that you don't need a homogeneous
syntax (s-exprs) to write macros and DSLs can be easier to implement with
macros.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8to75$j83$3@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> A set of well-designed set of functions taking and constructing closures
>> is a domain-specific language, too.
>> I have seen this technique applied in Haskell, and I see no reason why
>> it wouldn't work in any language with closures and HOFs (including Lisp).
> 
> That works, but the rest doesn't work in Haskell. If you have a foreign
> syntax for you DSL with a different grammar and you must compile it into
> your host language, you need macros.

Assuming that Lisp macros still use some variant of S-expression syntax, 
I don't see how this would be different for (say) a Haskell variant 
(except that Haskell would allow you to leave out the topmost level of 
parentheses, which is a very minor point in this case).

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b26960$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> That works, but the rest doesn't work in Haskell. If you have a foreign
>> syntax for you DSL with a different grammar and you must compile it into
>> your host language, you need macros.
> 
> Assuming that Lisp macros still use some variant of S-expression syntax,

Sorry, I was referring to OCaml's macros.

> I don't see how this would be different for (say) a Haskell variant
> (except that Haskell would allow you to leave out the topmost level of
> parentheses, which is a very minor point in this case).

Yes.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b1b52b$0$1635$ed2619ec@ptn-nntp-reader02.plus.net>
Gisle Sælensminde wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>> How is Lisp any more programmable than, say, OCaml?
> 
> One example is lisp macros that allow you to make domain-specific
> extansions. I have personally extended my code with macros for lex and
> yacc like lexers/parsers inside my code. To my knowledgem this could not
> be done as easily in OCaml.

If you're interested in how easily this can be done in OCaml, have a look at
some OCaml macros or the Minim interpreters written in OCaml that I posted
earlier in this thread. Such things are easier in OCaml...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8o6fd$d9r$1@registered.motzarella.org>
Jon Harrop schrieb:
> Andr� Thieme wrote:
>> I wanted to express that OCaml is not a multiparadigm language, while
>> Lisp is one. OCaml (and even more so Haskell) is specialized on
>> functional programming. Take for example the one namespace.
>> Lisp has as many you want. But for functional programming it means that
>> you need  function or #'  and also  funcall. That adds (admittedly tiny)
>> extra complexity in the Lisp code... those will add up when doing nearly
>> completely functional programming.
>> Also Lisp has no implicit currying. So you get another bit of complexity
>> as you have to say (mapcar (curry #'* 5) list). In Haskell it would be
>> something like     map (* 5) l.
>> Whenever you decide for a feature in a programming language you decide
>> against others.
>> By orienting the syntax strongly on functional programming it becomes
>> more cumbersome to express other things.
> 
> You just gave examples of Lisp being cumbersome and concluded that OCaml and
> Haskell are cumbersome. Can you give some examples where OCaml or Haskell
> syntax is cumbersome?

(Int64.to_float (Int64.sub (Int64.mul q (Int64.of_int n)) (Int64.mul s 
s))) /. (float n)
Handy syntax for self made data types / containers.
Make List Comprehension in OCaml.

Look at http://www.ffconsultancy.com/ocaml/ray_tracer/index.html
Make it so that you can say [0 0 0] instead of (vec3 0. 0. 0.)
Change every occurence of *. and +. to * and +
You have many let's in there. Don't make:
let delta = sqrt epsilon_float and pi = 4. *. atan 1.
let sqr x = x *. x
let sin t = sin(pi *. t /. 180.) and cos t = cos(pi *. t /. 180.)
let clamp l u x = if x < l then l else if x > u then u else x

but instead
multilet delta = sqrt epsilon_float and pi = 4 * atan 1
          sqr x = x * x
          sin t = sin(pi * t / 180) and cos t = cos(pi * t / 180)
          clamp l u x = if x < l then l else if x > u then u else x

and so on.

You might be able to give much better examples.

Have you ever tried using Haskell? Is there something you like better
in the OCaml syntax that is not present in Haskell? Change Haskell so
that it uses OCaml syntax.
Extend Haskell for implicit currying for the n-th argument and add
implicit "multi currying".
You want to create a color object creating function that is always
returns very green colors?
Make it so that
let greenMaker = rgb _ 255
does the job.
Make < and > take any number of arguments. Allow this:
let special< = < a _ b _ c          # "multicurrying"

so that you can call it
let special<10/20/30 = special< 10 20 30

and special<10/20/30 15 25    results in "true" while
special<10/20/30 15 19        results in "false".

I don't know, perhaps this is easily possible.
An expert in the respective languages probably knows weak points.
For Haskell I don't know, perhaps there is no easy way of making
array-comprehension with basically the same syntax as for list 
comprehension,
but instead making in-place changes in arrays *shrug*.


>> Lisp basically comes with nothing, which is the price it pays for being
>> programmable.
> 
> How is Lisp any more programmable than, say, OCaml?

Macros that have knowledge about the environment and don't come in the
form of a separate pre-processor. Compiler Macros. Reader Macros.
Exchangable read table.
Runtime creation of code (eval, compile).


Andr�
-- 
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8o9p3$rd6$1@wildfire.prairienet.org>
Andr� Thieme wrote:
 >> Can you give some examples where
 >> OCaml or Haskell syntax is cumbersome?
 > (Int64.to_float (Int64.sub (Int64.mul q (Int64.of_int n))
 >                            (Int64.mul s s)))
 > /. (float n)

Actually, it's not quite that bad.  You can
define your own operators if you want to.

let (+~) x y = Int64.add x y
let (-~) x y = Int64.sub x y
let ( *~) x y = Int64.mul x y
let (/~) x y = Int64.div x y

let s = Int64.of_string "1234567890"
let n = 2
let q = Int64.of_string "987654321"
;;
Printf.printf "%f\n"
((Int64.to_float (q *~ (Int64.of_int n) -~ (s *~ s))) /. (float n))
;;

=> -762078936521871744.000000

-- 
Dan
www.prairienet.org/~dsb/
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8obl5$sih$1@registered.motzarella.org>
Dan Bensen schrieb:
> Andr� Thieme wrote:
>  >> Can you give some examples where
>  >> OCaml or Haskell syntax is cumbersome?
>  > (Int64.to_float (Int64.sub (Int64.mul q (Int64.of_int n))
>  >                            (Int64.mul s s)))
>  > /. (float n)
> 
> Actually, it's not quite that bad.  You can
> define your own operators if you want to.
> 
> let (+~) x y = Int64.add x y
> let (-~) x y = Int64.sub x y
> let ( *~) x y = Int64.mul x y
> let (/~) x y = Int64.div x y

Interesting.
Now I am curious:
how can I remove the patterns out of your code Dan?
I want something like:

map (op f -> let op x y = f x y) [+~, -~, *~, /~] [Int64.add, Int64.sub, 
Int64.mul, Int64.div]

Does that make sense?


Andr�
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46afcb1b$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
>> let (+~) x y = Int64.add x y
>> let (-~) x y = Int64.sub x y
>> let ( *~) x y = Int64.mul x y
>> let (/~) x y = Int64.div x y
> 
> Interesting.
> Now I am curious:
> how can I remove the patterns out of your code Dan?
> I want something like:
> 
> map (op f -> let op x y = f x y) [+~, -~, *~, /~] [Int64.add, Int64.sub,
> Int64.mul, Int64.div]
> 
> Does that make sense?

You could do that with a macro but, in this case, you can cancel the
arguments from the definitions and define them simultaneously as a 4-tuple,
so you just write:

  open Int64
  let ( + ), ( - ), ( * ), ( / ) = add, sub, mul, div

When you run this you get the four definitions that you want:

# open Int64;;
# let ( + ), ( - ), ( * ), ( / ) = add, sub, mul, div;;
val ( + ) : int64 -> int64 -> int64 = <fun>
val ( - ) : int64 -> int64 -> int64 = <fun>
val ( * ) : int64 -> int64 -> int64 = <fun>
val ( / ) : int64 -> int64 -> int64 = <fun>

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46afcb75$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Make List Comprehension in OCaml.

This has already been done.

> Look at http://www.ffconsultancy.com/ocaml/ray_tracer/index.html
> Make it so that you can say [0 0 0] instead of (vec3 0. 0. 0.)

A macro like this:

  | "["; x=FLOAT; y=FLOAT; z=FLOAT; "]" -> <:expr< vec3 $x$ $y$ $z$ >>

> Change every occurence of *. and +. to * and +

I just realised that this is the same as the brackets-in-Lisp argument.
Rather than removing the "." from OCaml or the ()s from Lisp, I think it is
preferable to simply fade them out in the editor.

I think it is important to remember that +. is not just an addition
operator, it is also a type annotation as well. So a literal translation
from OCaml to F# is:

  x +. y
  (x : float) + (y : float)

> You have many let's in there. Don't make:
> let delta = sqrt epsilon_float and pi = 4. *. atan 1.
> let sqr x = x *. x
> let sin t = sin(pi *. t /. 180.) and cos t = cos(pi *. t /. 180.)
> let clamp l u x = if x < l then l else if x > u then u else x
> 
> but instead
> multilet delta = sqrt epsilon_float and pi = 4 * atan 1
>           sqr x = x * x
>           sin t = sin(pi * t / 180) and cos t = cos(pi * t / 180)
>           clamp l u x = if x < l then l else if x > u then u else x
> 
> and so on.

You could replace "=" with a different operator instead, using something
like:

  | p=patt; "=>"; e=expr -> <:str_item< let $p$ = $e$ >>

delta => sqrt epsilon_float and pi = 4. *. atan 1.
sqr x => x *. x
sin t => sin(pi *. t /. 180.) and cos t = cos(pi *. t /. 180.)
clamp l u x => if x < l then l else if x > u then u else x

I haven't seen a multilet but people have written other lexers and parsers
for OCaml, including whitespace sensitive ones. I am not very keen on this
idea though as it makes autoindenting impossible.

Also, Haskell has "where" and there is already an OCaml macro implementing
it.

> Have you ever tried using Haskell?

I've been tinkering with Haskell for a few months now.

> Is there something you like better in the OCaml syntax that is not present
> in Haskell? 

OCaml's autoindenting. Cut and pasted Haskell code never works...

> Change Haskell so that it uses OCaml syntax.

It would have to be the subset of OCaml that Haskell supports, of course.

> Extend Haskell for implicit currying for the n-th argument and add
> implicit "multi currying".

Haskell already has syntactic support for currying the same as OCaml.

> You want to create a color object creating function that is always
> returns very green colors?
> Make it so that
> let greenMaker = rgb _ 255
> does the job.

OCaml's labelled arguments take care of that quite nicely:

# let rgb ~r ~g ~b = r, g, b;;
val rgb : r:'a -> g:'b -> b:'c -> 'a * 'b * 'c = <fun>
# let greenMaker = rgb ~g:255;;
val greenMaker : r:'a -> b:'b -> 'a * int * 'b = <fun>

I remembered today that Mathematica has rather a nifty notation for lambda
functions, e.g. a squaring function applied to five:

  In[1] := #^2&[5]
  Out[1] = 25

Note that "#" or "#1" refers to the first argument and everything before
the "&" is the function.

But one particularly cool thing is that you can refer to a lambda function
recursively as #0. For example, five factorial:

  In[2] := If[# == 0, 1, # #0[#-1]]&[5]
  Out[2] = 120

> Make < and > take any number of arguments.

If you mean this:

  a < x <= b

then I think that is a great idea. I wish more languages did that. In fact,
this is another good task for OCaml's macros...

> Allow this: 
> let special< = < a _ b _ c          # "multicurrying"

Not sure I like that so much! :-)

> so that you can call it
> let special<10/20/30 = special< 10 20 30
> 
> and special<10/20/30 15 25    results in "true" while
> special<10/20/30 15 19        results in "false".

I think that is a bit obfuscated. Perhaps Mathematica's syntax is
preferable:

  10 < #1 < 20 < #2 < 30 &

> For Haskell I don't know, perhaps there is no easy way of making
> array-comprehension with basically the same syntax as for list
> comprehension, but instead making in-place changes in arrays *shrug*.

F# is very nice in this regard. You can use:

  a.[i] <- v

to update arrays, hash tables and so on. You can generate lists, arrays and
lazy sequences using comprehensions:

  {0 .. 10}
  [0 .. 10}
  [|0 .. 10|]

With all the usual maps and filters.

They just added slicing as well:

  let t = a[1::]

>> How is Lisp any more programmable than, say, OCaml?
> 
> Macros that have knowledge about the environment

Can you explain what exactly you mean by this?

> and don't come in the form of a separate pre-processor.

What exactly is it that you want to do?

> Compiler Macros.

You can use camlp4 to generate OCaml code. Is that the same thing?

> Reader Macros. 

Camlp4 can parse token streams, which I think is what you want.

> Exchangable read table.

Camlp4 provides extensible lexers, which I think is what you want (but I
haven't used them yet).

> Runtime creation of code (eval, compile). 

That would be nice. Some of the OCaml tools already do this internally but
(AFAIK) there is no API to let you compile and link in code on-the-fly.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad2629$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
Stefan Ram wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>>As Haskell has shown, laziness cannot be implemented efficient at all.
> 
> http://neilmitchell.blogspot.com/2007/07/making-haskell-faster-than-c.html

Read the first comment:

  "Why not try writing a C implementation of the program requirements that's
actually fast?"

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Wkmqi.21084$j7.379651@news.indigo.ie>
> Lisp could thrive with predefined operator precedence plus 
> indentation-is-parentheses 

Your opinion that such lispoids thrive is just not borne out by
history - Dylan, SRFI-49, etc.  
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5smf1dm.fsf@pu100877.student.princeton.edu>
Joachim Durchholz <··@durchholz.org> writes:

> Lisp could thrive with predefined operator precedence plus
> indentation-is-parentheses (plus parentheses for those cases where you
> still need them), have 90% less parentheses, 30% more code per page,
> and *still* be simple to process macrologically.

It has been done -- would you claim that Dylan is thriving?

>> making it an argument is done by the compiler.
>> (+ 1 2 3) instead of (+ (list 1 2 3))
>
> OK, currying cannot handle that case; the Haskell equivalent of the
> above would be
>   plus [1 2 3]
>
> However, such lists are needed only in a few places in a program. I
> don't think that this case warrants special attention to make it easy
> - 
> not if it constrains the design space elsewhere.

Whether you use them in a few places or a lot depends on your
programming style, which is shaped by the languages you have used in
the past.  I would be more careful with making claims like this one.

>>> Code transformation is done by the compiler in Haskell.
>>> Abstraction (i.e. new languages) is daily staple in Haskell. Sure,
>>> Haskell embedded languages cannot do fancy syntax, but if you're
>>> fine with frugal syntax, you define your embedded language simply
>>> as a set of HOFs to use.
>>
>> You can do the same in Lisp by making things lazy.
>
> Ah, but laziness cannot be efficiently implemented in Lisp.

Why?  What is "efficient" enough for you?  Lazyness for part of your
program is easy to implement, and is pretty efficient.  Of course it
will not be as efficient as Haskell, where lazyness is integrated into
the whole language.  However, lazyness is not the holy grail, it makes
a lot of sense if you can write your algorithm more simply and a lot
of the stuff doesn't get evaluated, but it can be a curse if you know
that 99% of operations you code need to be evaluated anyway.  Most of
the time, it doesn't make a whole lot of difference in performance.
And I have seen Haskellers fight the language to get non-lazy
evaluation.

Tamas
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <4Pnqi.18508$9l5.904245@phobos.telenet-ops.be>
Tamas Papp wrote:
> Joachim Durchholz <··@durchholz.org> writes:
> 
>> Lisp could thrive with predefined operator precedence plus
>> indentation-is-parentheses (plus parentheses for those cases where you
>> still need them), have 90% less parentheses, 30% more code per page,
>> and *still* be simple to process macrologically.
> 
> It has been done -- would you claim that Dylan is thriving?
> 
>>> making it an argument is done by the compiler.
>>> (+ 1 2 3) instead of (+ (list 1 2 3))
>> OK, currying cannot handle that case; the Haskell equivalent of the
>> above would be
>>   plus [1 2 3]
>>
>> However, such lists are needed only in a few places in a program. I
>> don't think that this case warrants special attention to make it easy
>> - 
>> not if it constrains the design space elsewhere.
> 
> Whether you use them in a few places or a lot depends on your
> programming style, which is shaped by the languages you have used in
> the past.  I would be more careful with making claims like this one.
> 
>>>> Code transformation is done by the compiler in Haskell.
>>>> Abstraction (i.e. new languages) is daily staple in Haskell. Sure,
>>>> Haskell embedded languages cannot do fancy syntax, but if you're
>>>> fine with frugal syntax, you define your embedded language simply
>>>> as a set of HOFs to use.
>>> You can do the same in Lisp by making things lazy.
>> Ah, but laziness cannot be efficiently implemented in Lisp.
> 
> Why?  What is "efficient" enough for you?  Lazyness for part of your
> program is easy to implement, and is pretty efficient.  Of course it
> will not be as efficient as Haskell, where lazyness is integrated into
> the whole language.  However, lazyness is not the holy grail, it makes
> a lot of sense if you can write your algorithm more simply and a lot
> of the stuff doesn't get evaluated, but it can be a curse if you know
> that 99% of operations you code need to be evaluated anyway.  Most of
> the time, it doesn't make a whole lot of difference in performance.
> And I have seen Haskellers fight the language to get non-lazy
> evaluation.
> 
> Tamas

Lazyness is also useful for separation of concerns.
read a file as if it was slurped entirely, process it as if it was fully 
available in memory, write the result as if they were all processed...
But do it in constant space. And no need for complex interleaving code.

I think that's pretty nice !

Sacha
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa1c53$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Sacha wrote:
> Lazyness is also useful for separation of concerns.
> read a file as if it was slurped entirely, process it as if it was fully
> available in memory, write the result as if they were all processed...
> But do it in constant space. And no need for complex interleaving code.
> 
> I think that's pretty nice !

Absolutely, but you don't want laziness everywhere all the time because it
means you can't write fast code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <0hkka313fjsago9bsiatn8n4pqln46n0en@4ax.com>
On Fri, 27 Jul 2007 17:15:46 +0100, Jon Harrop <···@ffconsultancy.com>
wrote:

>Sacha wrote:
>> Lazyness is also useful for separation of concerns.
>> read a file as if it was slurped entirely, process it as if it was fully
>> available in memory, write the result as if they were all processed...
>> But do it in constant space. And no need for complex interleaving code.
>> 
>> I think that's pretty nice !
>
>Absolutely, but you don't want laziness everywhere all the time because it
>means you can't write fast code.

It's true that complete laziness is not desirable, but the functional
call-by-need model (or at least some variant of it) seems to hold the
most promise for automagically exploiting parallelization on
multiprocessors.

George
--
for email reply remove "/" from address
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa7b59$0$1587$ed2619ec@ptn-nntp-reader02.plus.net>
George Neuner wrote:
> It's true that complete laziness is not desirable, but the functional
> call-by-need model (or at least some variant of it) seems to hold the
> most promise for automagically exploiting parallelization on
> multiprocessors.

The evidence I have seen does not agree with that:

1. GHC does have some experimental support for such things. Threads are
spawned aggressively so they must use lightweight threads. The lightweight
thread implementation is cooperatively multitasked and yields only at
allocations. Optimized Haskell code is likely to not allocate, so it never
yields and your other threads are never run. People have cited this
practical problem of the Haskell approach introducing dead locks in their
correct code on the Haskell Cafe mailing list (see "GHC threads and SMP").

2. We used .NET's threading to make our 250kLOC F# for Visualization product
entirely concurrent. The results are superb and the entire conversion took
only 1 man week and required changes in only four places of the entire
source code. This is a direct result of our writing mostly pure code in
OCaml/F#. The 20% that is performance critical uses mutation but still
abstracts beautifully and requires only a small number of locks as a
consequence.

We have also started to write concurrent OCaml code and, despite not having
a concurrent GC, the results are very impressive so far.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5thla3tu3meqnp4jj10iome45mpoia1rvh@4ax.com>
On Sat, 28 Jul 2007 00:01:12 +0100, Jon Harrop <···@ffconsultancy.com>
wrote:

>George Neuner wrote:
>> It's true that complete laziness is not desirable, but the functional
>> call-by-need model (or at least some variant of it) seems to hold the
>> most promise for automagically exploiting parallelization on
>> multiprocessors.
>
>The evidence I have seen does not agree with that:
>
>1. GHC does have some experimental support for such things. Threads are
>spawned aggressively so they must use lightweight threads. The lightweight
>thread implementation is cooperatively multitasked and yields only at
>allocations. Optimized Haskell code is likely to not allocate, so it never
>yields and your other threads are never run. People have cited this
>practical problem of the Haskell approach introducing dead locks in their
>correct code on the Haskell Cafe mailing list (see "GHC threads and SMP").

GHC may aggressively spawn threads, but their execution is still lazy.
Cooperative threading tends to interact poorly with lazy evaluation -
unless the programmer is careful, the result, more often than not, is
just lazy evaluation with additional thread overhead.  I'm not sure
how the automatic threading apportions work among the system threads,
but I do know that user threads must be started explicitly.  And the
GC is still single threaded if I'm not mistaken.  

I want parallelism to be exploited automagically on close coupled
multiprocessors and I want the speedup to be super-linear whenever
possible.  Haskell, as it stands now, can't do that because its
inherent parallelism at any point is limited by the shape of the call
tree and its run time is always proportional to the depth of the tree.

The model I've come to advocate for close coupled multiprocessing
(such as multiple core processors) is the inverse of Haskell - for
lack of a standard term I'm going to call it "predicated speculative
evaluation" - in which a function is evaluated speculatively as soon
as its preconditions are satisfied rather than waiting until its
results are needed.

One way to think of it would be as a CPS transformed program in which
all paths are speculatively evaluated in parallel with any false
conditional aborting further evaluation along its particular path.
This view is not entirely accurate because it doesn't take into
account multiple entry points made possible by shared data/conditions,
but it is the closest model that most software people understand.

The difficulty lies in choosing the set of preconditions to allow
reasonable speculative evaluation along relevant program paths while
limiting it along irrelevant ones.  Just performing the equivalent of
a breadth-first search among the ready paths is relatively easy, but
it wastes cycles and doesn't buy much in terms of enhanced
performance.  I've been noodling the precondition issue for a while
and I don't yet have a satisfactory answer.  It seems to be a hard
problem.


>2. We used .NET's threading to make our 250kLOC F# for Visualization product
>entirely concurrent. The results are superb and the entire conversion took
>only 1 man week and required changes in only four places of the entire
>source code. This is a direct result of our writing mostly pure code in
>OCaml/F#. The 20% that is performance critical uses mutation but still
>abstracts beautifully and requires only a small number of locks as a
>consequence.

That just shows that .NET's thread model is a pretty good match to
your program.  And since .NET threads are started manually, it says
little about the inherent parallelism of the program.


>We have also started to write concurrent OCaml code and, despite not having
>a concurrent GC, the results are very impressive so far.

George
--
for email reply remove "/" from address
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xmyxhuhip.fsf@ruckus.brouhaha.com>
George Neuner <·········@comcast.net> writes:
> I want parallelism to be exploited automagically on close coupled
> multiprocessors and I want the speedup to be super-linear whenever
> possible.  Haskell, as it stands now, can't do that because its
> inherent parallelism at any point is limited by the shape of the call
> tree and its run time is always proportional to the depth of the tree.

You know about this?

http://research.microsoft.com/~simonpj/papers/ndp/index.htm
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <j53sa31no3c1rnljk8unkep4o912l3qmtp@4ax.com>
On 28 Jul 2007 00:14:22 -0700, Paul Rubin
<·············@NOSPAM.invalid> wrote:

>George Neuner <·········@comcast.net> writes:
>> I want parallelism to be exploited automagically on close coupled
>> multiprocessors and I want the speedup to be super-linear whenever
>> possible.  Haskell, as it stands now, can't do that because its
>> inherent parallelism at any point is limited by the shape of the call
>> tree and its run time is always proportional to the depth of the tree.
>
>You know about this?
>
>http://research.microsoft.com/~simonpj/papers/ndp/index.htm

Yes, I've seen this.  I spent a while working with Connection Machines
in college and have written a lot of signal and image processing code
for DSPs and SIMD PCs - I am quite familiar with data parallel.  I've
also done quite a bit of work with shared memory multiprocessors.

However, classic data parallel excludes or does nothing to accelerate
many interesting graph and network algorithms.  Most shared memory
MIMD DP implementations (including DP Haskell) require vectorizing the
data and perform poorly on highly data dependent algorithms such as
following multiple paths through a graph and performing node dependent
computations.
[It's not that graphs can't be implemented as vectors, but in general
it's a hard problem for a compiler to make such a transformation
automatically.]

Compiler directed micro-threading is the answer to that, but automatic
threading is an equally hard problem for impure languages.  Haskell,
has an advantage in that any non-monad code potentially could be
micro-threaded, but the problem is that the interesting code in most
programs is stateful code that interacts with the outside world and
can't easily be threaded automatically.

This, I think is where the predication idea would help because it
would serve to automatically serialize related stateful computations
and allow directed micro-threading of independent ones.

George
--
for email reply remove "/" from address
From: John Thingstad
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <op.twaiq614pqzri1@pandora.upc.no>
P� Mon, 30 Jul 2007 18:57:01 +0200, skrev George Neuner  
<·········@comcast.net>:
>
> This, I think is where the predication idea would help because it
> would serve to automatically serialize related stateful computations
> and allow directed micro-threading of independent ones.
>

This sounds interesting. But what exactly are you saying here?
Any URL's?
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <6ejua3tmk8kppm22741u0h83b7pv96d0oq@4ax.com>
On Mon, 30 Jul 2007 20:24:32 +0200, "John Thingstad"
<··············@chello.no> wrote:

>P� Mon, 30 Jul 2007 18:57:01 +0200, skrev George Neuner  
><·········@comcast.net>:
>>
>> This, I think is where the predication idea would help because it
>> would serve to automatically serialize related stateful computations
>> and allow directed micro-threading of independent ones.
>>
>
>This sounds interesting. But what exactly are you saying here?
>Any URL's?

Unfortunately I don't have any URLs to give you.  If you google
"predicated speculative execution" you'll get a lot of cites for
hardware.  The software model originated from research on massively
parallel dataflow computers in the 80's - specifically it's an
outgrowth from the hardware model known as "asynchronous logic with
registers".  Software dataflow has made a comeback recently but I'm
not aware of anyone currently working along this particular line.

The basic idea is to compile the program using memoized call-by-need
(call-by-name also works).  The resulting value thunks are annotated
with guard predicates specifying the conditions (program path,
required data exists, I/O channel is open, thunk #481 has already
executed, etc.) under which they can be evaluated.  So each thunk has
the general form:

  (when (and P1 P2 P3 ... ) 
     ... )

Conceptually all the thunks are evaluated in parallel, blocked from
executing while their guard predicates remain unsatisfied.  While the
main CPU executes the sequential program, auxiliary processors run a
matching loop comparing predicates against the current program
environment and executing thunks whose conditions are satisfied (as in
a rule-based expert system).  The program environment is updated with
the results, potentially activating other thunks.  A clever
implementation would organize the thunks into a discrimination tree
based on the predicates so that only the potentially ready subset of
thunks need be examined.

Today there are probably better ways to organize the runtime - e.g.,
by having each thunk be aware of its dependents and threading
activations - but pattern match dispatching was the way it was done
back then.  The problems are still how to ensure good distribution of
computation over the available processors, what is the appropriate
size/complexity for creating a thunk, and how to determine the set of
run conditions for each thunk that allows maximum speculative
execution without excessive wasted computations.

George
--
for email reply remove "/" from address
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8popm$40r$1@online.de>
George Neuner schrieb:
> the problem is that the interesting code in most
> programs is stateful code that interacts with the outside world and
> can't easily be threaded automatically.

This is just the way people write code in imperative languages.

For programs written in a pure FPL, the vast majority of running code 
does not interact. (E.g. in a GUI library, a lot of code would be 
devoted to setting up the bitmaps of a drop-down menu, the code that 
finally puts the bitmap on the scree would be just the finishing touch.)

> This, I think is where the predication idea would help because it
> would serve to automatically serialize related stateful computations
> and allow directed micro-threading of independent ones.

This would probably help programs that do mostly I/O, but I don't think 
many applications fall in that area (mostly microcontrollers, I'd say, 
but even these move more bits when computing what to do than when 
actually doing anything).

Regards,
Jo
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185612549.343718.320020@g4g2000hsf.googlegroups.com>
On 28 Jul., 07:49, George Neuner <·········@comcast.net> wrote:
>
> I want parallelism to be exploited automagically on close coupled
> multiprocessors and I want the speedup to be super-linear whenever
> possible.  Haskell, as it stands now, can't do that because its
> inherent parallelism at any point is limited by the shape of the call
> tree and its run time is always proportional to the depth of the tree.
>
> The model I've come to advocate for close coupled multiprocessing
> (such as multiple core processors) is the inverse of Haskell - for
> lack of a standard term I'm going to call it "predicated speculative
> evaluation" - in which a function is evaluated speculatively as soon
> as its preconditions are satisfied rather than waiting until its
> results are needed.

Don't confuse Haskell's non-strict semantics with laziness. Lazy
evaluation is only one possible implementation strategy. Thanks to
purity, the speculative eager evaluation you describe is another
perfectly valid strategy for implementing Haskell. And in fact, it has
been done:

  http://csg.csail.mit.edu/pubs/haskell.html

Unfortunately, the project seems dead.

- Andreas
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8f0da$jdt$1@online.de>
George Neuner schrieb:
> The difficulty lies in choosing the set of preconditions to allow
> reasonable speculative evaluation along relevant program paths while
> limiting it along irrelevant ones.  Just performing the equivalent of
> a breadth-first search among the ready paths is relatively easy, but
> it wastes cycles and doesn't buy much in terms of enhanced
> performance.  I've been noodling the precondition issue for a while
> and I don't yet have a satisfactory answer.  It seems to be a hard
> problem.

My approach would have been "if there are two subexpressions in an 
expression, fork each expression off in a separate thread". On a 
uniprocessor machine, this will indeed give you just the normal 
evaluation order with an additional overhead for threading, but on a 
multiprocessor machine, this should put any unused CPU to immediate use.

This seems to be exactly what Haskell is doing anyway, so I'm not sure 
what the problem actually is. Surely people don't expect their code to 
go magically faster on a uniprocessor machine by making it 
multithreaded, do they?

Or am I missing something?

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ac6ff6$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> My approach would have been "if there are two subexpressions in an
> expression, fork each expression off in a separate thread".

You can do that, and it will run concurrently but it won't be any faster
because you spend all of your time forking.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ig4k$qtv$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> My approach would have been "if there are two subexpressions in an
>> expression, fork each expression off in a separate thread".
> 
> You can do that, and it will run concurrently but it won't be any faster
> because you spend all of your time forking.

Erlang systems routinely multithread massively, a few years ago several 
100,000s on a single-processor machine were not a problem.
Similar observations have been made for Mozart/Oz and Alice.

I think you're letting yourself get deterred by the heavyweight process 
and thread models that are typically available on a Unixoid platform.

Of course, on a single-CPU machine, you'll still get no speedup. You'd 
need a dual-processor machine to get anything out of such a regime.
And the interesting question would be to ask when to multithread and 
when to compute serially. And who's to decide that: the programmer, the 
compiler, or the run-time system. (I'm not sure which of these has the 
best data for that decision.)

Regards,
Jo
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xbqdvjduf.fsf@ruckus.brouhaha.com>
Joachim Durchholz <··@durchholz.org> writes:
> Erlang systems routinely multithread massively, a few years ago
> several 100,000s on a single-processor machine were not a problem.
> Similar observations have been made for Mozart/Oz and Alice.

This still doesn't sound so great from a cache hit rate perspective.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8k7vc$mtd$1@online.de>
Paul Rubin schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
>> Erlang systems routinely multithread massively, a few years ago
>> several 100,000s on a single-processor machine were not a problem.
>> Similar observations have been made for Mozart/Oz and Alice.
> 
> This still doesn't sound so great from a cache hit rate perspective.

I quoted these numbers just to demonstrate that multithreading overhead 
need not be as large as many people think.

Regards,
Jo
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185781421.735930.98980@l70g2000hse.googlegroups.com>
On Jul 29, 6:44 pm, Joachim Durchholz <····@durchholz.org> wrote:
> Jon Harrop schrieb:
>
> > Joachim Durchholz wrote:
> >> My approach would have been "if there are two subexpressions in an
> >> expression, fork each expression off in a separate thread".
>
> > You can do that, and it will run concurrently but it won't be any faster
> > because you spend all of your time forking.
>
> Erlang systems routinely multithread massively, a few years ago several
> 100,000s on a single-processor machine were not a problem.

I use to fear Erlang but not anymore as actors  could be done via
library like with scala. http://lamp.epfl.ch/~phaller/actors.html Lisp
still doesn't  have a production ready actor library but it's because
nobody needs such massive mutlithreading yet, except maybe for showing
up. I wanted to see could I implemented myslef but than I understood
that current thread system if fine for now and I could spent my time
on things that are more important to me.

> Similar observations have been made for Mozart/Oz and Alice.
As I know mozart doesn't support SMP. Also don't forgeth Gambit which
implements actor model and termite  http://www.iro.umontreal.ca/~germaing/termite.pdf
>
> I think you're letting yourself get deterred by the heavyweight process
> and thread models that are typically available on a Unixoid platform.
>
> Of course, on a single-CPU machine, you'll still get no speedup. You'd
> need a dual-processor machine to get anything out of such a regime.
> And the interesting question would be to ask when to multithread and
> when to compute serially. And who's to decide that: the programmer, the
> compiler, or the run-time system. (I'm not sure which of these has the
> best data for that decision.)
>
> Regards,
> Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad2b3e$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Erlang systems routinely multithread massively, a few years ago several
> 100,000s on a single-processor machine were not a problem.
> Similar observations have been made for Mozart/Oz and Alice.

For CPU-intensive computations on cyclic data structures?

> I think you're letting yourself get deterred by the heavyweight process
> and thread models that are typically available on a Unixoid platform.

I think you're letting yourself get deterred by the light weight of Erlang's
threads whilst neglecting the sacrifices made by Erlang's design that
degrade performance elsewhere.

> Of course, on a single-CPU machine, you'll still get no speedup.

I believe you will see massive performance degradation for many important
applications.

> You'd need a dual-processor machine to get anything out of such a regime.

I believe you will need far more than two cores/CPUs before this approach to
concurrency becomes ubiquitously preferable for CPU-intensive work.

> And the interesting question would be to ask when to multithread and
> when to compute serially. And who's to decide that: the programmer, the
> compiler, or the run-time system. (I'm not sure which of these has the
> best data for that decision.)

Exactly. I think the programmer must be able to do this themselves because
there is no way you can possibly get the compiler to infer a good answer in
all cases.

Incidentally, you may be interested in the forthcoming support for
concurrency in F#, which draws upon Erlang and Haskell.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Chris Smith
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <MPG.2116dc01862d3119898d0@news.altopia.net>
Jon Harrop <···@ffconsultancy.com> wrote:
> Incidentally, you may be interested in the forthcoming support for
> concurrency in F#, which draws upon Erlang and Haskell.
> 

What does it look like.  Given the players, I imagine it has to do with 
transactional memory?  That would be nice.

-- 
Chris Smith
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8k448$ijp$1@online.de>
(I'm dropping on massive multiple processes - Jon is correct but missing 
the topic, and I don't have the time to answer that right now.)

Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> And the interesting question would be to ask when to multithread and
>> when to compute serially. And who's to decide that: the programmer, the
>> compiler, or the run-time system. (I'm not sure which of these has the
>> best data for that decision.)
> 
> Exactly. I think the programmer must be able to do this themselves because
> there is no way you can possibly get the compiler to infer a good answer in
> all cases.

It the compiler had to give a good answer in all cases, we'd all still 
be programming in assembler. It would suffice if it gives a good answer 
in most cases, and can be led to a better answer in those cases where 
the compiler's default answer isn't good enough.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185812487.746995.221990@j4g2000prf.googlegroups.com>
On Jul 27, 10:49 pm, George Neuner <·········@comcast.net> wrote:
> I want parallelism to be exploited automagically on close coupled
> multiprocessors and I want the speedup to be super-linear whenever
> possible.

Cool.  If that technique never results in sublinear speedup, we can
use it on a uniprocessor to get faster code.  (Ignoring things like
cache effects, super-linear speedup can only come from different
execution order, which can be implemented on a uniprocessor.)

-andy
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <qahsa3djlcv3hf4k0bhfe7tpgh0b897qph@4ax.com>
On Mon, 30 Jul 2007 09:21:27 -0700, Andy Freeman
<······@earthlink.net> wrote:

>On Jul 27, 10:49 pm, George Neuner <·········@comcast.net> wrote:
>> I want parallelism to be exploited automagically on close coupled
>> multiprocessors and I want the speedup to be super-linear whenever
>> possible.
>
>Cool.  If that technique never results in sublinear speedup, we can
>use it on a uniprocessor to get faster code.  (Ignoring things like
>cache effects, super-linear speedup can only come from different
>execution order, which can be implemented on a uniprocessor.)
>
>-andy

Unfortunately, that is not possible.  Any mechanism to partition the
code and join the results introduces sufficient overhead that it will
be slower than equivalent sequential code when run on a single
processor.  The trick in parallelization is to achieve linear (or
better) speedup using the least number of additional processors.

I suppose there is a question as to where threaded cores fit into the
picture.  They can be either faster or slower than sequential cores
depending on the inherent instruction level parallelism, how they
dispatch functional units and how they share (or don't) the primary
cache.

George
--
for email reply remove "/" from address
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <95y7gy1cf8.fsf@hod.lan.m-e-leypold.de>
> George Neuner wrote:
>> It's true that complete laziness is not desirable, but the functional
>> call-by-need model (or at least some variant of it) seems to hold the
>> most promise for automagically exploiting parallelization on
>> multiprocessors.
>
> The evidence I have seen does not agree with that:

Well, that's the meaning of "promise": It's not (quite?) here yet. The
promise is well founded, though, because it is easier to analyze data
flow in a call-by-need model and distribute work on processor
automatically and with a fine granularity. As I understand the promise
:-).

> 1. GHC does have some experimental support for such things. Threads are
> spawned aggressively so they must use lightweight threads. The lightweight

I do not think we're are talking about explicit threading when
discussing the promise of CBN for SMP. I'm not sure, though.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46adcac9$0$1594$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
>> George Neuner wrote:
>>> It's true that complete laziness is not desirable, but the functional
>>> call-by-need model (or at least some variant of it) seems to hold the
>>> most promise for automagically exploiting parallelization on
>>> multiprocessors.
>>
>> The evidence I have seen does not agree with that:
> 
> Well, that's the meaning of "promise": It's not (quite?) here yet. The
> promise is well founded, though, because it is easier to analyze data
> flow in a call-by-need model and distribute work on processor
> automatically and with a fine granularity. As I understand the promise
> :-).

True.

>> 1. GHC does have some experimental support for such things. Threads are
>> spawned aggressively so they must use lightweight threads. The
>> lightweight
> 
> I do not think we're are talking about explicit threading when
> discussing the promise of CBN for SMP. I'm not sure, though.

As I understand it, that was not explicit threading.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ngmyxi3qot.fsf@hod.lan.m-e-leypold.de>
> And I have seen Haskellers fight the language to get non-lazy
> evaluation.
>

I've also seen MLers and Lispers and Schemers writing lots of
additional notation to get lazy evaluation. The question mostly is,
which should be the default (lazy or eager) and what are the
respective tradeoffs of this decisions (say purity + possibility of
eq. reasoning vs. a generela impact on efficiency or something like
that)

A single observation has no power of proof in this regard.


Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab841f$0$1590$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> A single observation has no power of proof in this regard.

I have another interesting observation. In eager languages, the programmer
controls evaluation order to make a program efficient. In Haskell,
programmers write declaratively and often optimize by randomly fiddling
until the current version of GHC happens to reduce it in an asymptotically
more efficient way.

To put that another way, you have to try quite hard to make OCaml-, SML-,
Lisp-, or Scheme-specific algorithmic optimizations but, in Haskell, you
can easily make algorithmic optimizations that are specific to GHC 6.6.1.

This is reflected in the fact that memory consumption often explodes in a
totally unpredictable way in Haskell.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8d13e$gsm$1@online.de>
Tamas Papp schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
> 
>> Lisp could thrive with predefined operator precedence plus
>> indentation-is-parentheses (plus parentheses for those cases where you
>> still need them), have 90% less parentheses, 30% more code per page,
>> and *still* be simple to process macrologically.
> 
> It has been done -- would you claim that Dylan is thriving?

If Dylan doesn't thrive, that might have any number of reasons.
(Including unavailability of editors that can't properly block-indent, 
NIY syndrome, and a couple of others.)

>>> making it an argument is done by the compiler.
>>> (+ 1 2 3) instead of (+ (list 1 2 3))
>> OK, currying cannot handle that case; the Haskell equivalent of the
>> above would be
>>   plus [1 2 3]
>>
>> However, such lists are needed only in a few places in a program. I
>> don't think that this case warrants special attention to make it easy
>> - 
>> not if it constrains the design space elsewhere.
> 
> Whether you use them in a few places or a lot depends on your
> programming style, which is shaped by the languages you have used in
> the past.  I would be more careful with making claims like this one.

Well, what I have seen here and elsewhere surely had a *lot* of parentheses.

>>>> Code transformation is done by the compiler in Haskell.
>>>> Abstraction (i.e. new languages) is daily staple in Haskell. Sure,
>>>> Haskell embedded languages cannot do fancy syntax, but if you're
>>>> fine with frugal syntax, you define your embedded language simply
>>>> as a set of HOFs to use.
>>> You can do the same in Lisp by making things lazy.
>> Ah, but laziness cannot be efficiently implemented in Lisp.
> 
> Why?  What is "efficient" enough for you?  Lazyness for part of your
> program is easy to implement, and is pretty efficient.  Of course it
> will not be as efficient as Haskell, where lazyness is integrated into
> the whole language.

Exactly.

> However, lazyness is not the holy grail,

Nor do I think it is.
I was just responding to the claim that "you can do the same in Lisp by 
making things lazy". I don't think that would be practical due to the 
overhead.
Hey, it's even difficult to get it efficient enough for day-to-day use 
in Haskell. Haskell run-time systems are specifically geared towards 
lazy evaluation (Google for "tagless spineless machine"), and Haskell 
compilers do a *lot* of strictness analysis to get rid of lazy 
evaluation wherever possible, which is massively aided by static typing.

Nothing of this is possible in Lisp (some of it not even desirable to 
keep the language useful for other goals).
So I conclude that "making things lazy to get it to work" isn't really 
an option - unless that laziness is controlled tightly and applied 
sparingly, that is, but that's not exactly how you'd do an embedded 
domain-specific language.

Note that this all is beside the point anyway. You don't need lazy 
evaluation to chain up HOFs, that's possible in a strict language as 
well (and hence in Lisp, too).
Sorry for not noticing the bigger context when reading the "by making 
things lazy" sentence.

 > it makes
> a lot of sense if you can write your algorithm more simply and a lot
> of the stuff doesn't get evaluated, but it can be a curse if you know
> that 99% of operations you code need to be evaluated anyway.  Most of
> the time, it doesn't make a whole lot of difference in performance.
> And I have seen Haskellers fight the language to get non-lazy
> evaluation.

I have seen people in other languages fight strictness (but, of course, 
attributing the problems to other things because everybody accepts 
strictness as a given). For example, everybody is accepting tons of 
contortions when all that you want to write down is "forall natural 
numbers, do so-and-so until condition this-and-that arises". Easy with 
laziness, contortious with strictness because you can't map the "forall 
natural numbers" part to a language construct directly - but people 
don't see it as a contortion, they just see it as how things are (if 
they think about it at all).

To do a real comparison, you'd probably need to train nonprogrammers 
both approaches in parallel.

Regards,
Jo
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <gojka31tm80c7q3m7jj3fb5sae8ab94i86@4ax.com>
On Fri, 27 Jul 2007 16:57:03 +0200, Joachim Durchholz
<··@durchholz.org> wrote:

>Tamas Papp schrieb:
>> Joachim Durchholz <··@durchholz.org> writes:
>> 
>>> Lisp could thrive with predefined operator precedence plus
>>> indentation-is-parentheses (plus parentheses for those cases where you
>>> still need them), have 90% less parentheses, 30% more code per page,
>>> and *still* be simple to process macrologically.
>> 
>> It has been done -- would you claim that Dylan is thriving?
>
>If Dylan doesn't thrive, that might have any number of reasons.
>(Including unavailability of editors that can't properly block-indent, 
>NIY syndrome, and a couple of others.)

Actually Dylan had many supporters back when it used s-exprs.  Support
began to fall away when the operator precedence syntax was introduced.
At first programmers could use either syntax, but most users preferred
s-exprs and nearly everyone stopped using it when the s-expr syntax
was dropped (a move pushed by salespeople who didn't understand their
importance to Dylan's acceptance).

George
--
for email reply remove "/" from address
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8dsil$i1c$1@online.de>
George Neuner schrieb:
> 
> Actually Dylan had many supporters back when it used s-exprs.  Support
> began to fall away when the operator precedence syntax was introduced.
> At first programmers could use either syntax, but most users preferred
> s-exprs and nearly everyone stopped using it when the s-expr syntax
> was dropped (a move pushed by salespeople who didn't understand their
> importance to Dylan's acceptance).

Strange, strange, strange.
Not that salespeople did dumb things (that's normal).
I'm a bit surprised that people have preferred the S-expressions. Did 
anybody give reasons? Was the drop in support due to changes to the 
syntax, or was it just a correlation?

Regards,
Jo
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5gvernF3il7gnU1@mid.individual.net>
Joachim Durchholz wrote:
> George Neuner schrieb:
>>
>> Actually Dylan had many supporters back when it used s-exprs.  Support
>> began to fall away when the operator precedence syntax was introduced.
>> At first programmers could use either syntax, but most users preferred
>> s-exprs and nearly everyone stopped using it when the s-expr syntax
>> was dropped (a move pushed by salespeople who didn't understand their
>> importance to Dylan's acceptance).
> 
> Strange, strange, strange.
> Not that salespeople did dumb things (that's normal).
> I'm a bit surprised that people have preferred the S-expressions. Did 
> anybody give reasons? Was the drop in support due to changes to the 
> syntax, or was it just a correlation?

I am not surprised at all. Apparently, the main target audience 
effectively were Lispers, and Lispers who have considerably experience 
with s-expressions typically prefer them over more mainstream syntaxes.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8gb9e$peb$1@aioe.org>
Pascal Costanza escreveu:
> Joachim Durchholz wrote:
>> George Neuner schrieb:
>>>
>>> Actually Dylan had many supporters back when it used s-exprs.  Support
>>> began to fall away when the operator precedence syntax was introduced.
>>> At first programmers could use either syntax, but most users preferred
>>> s-exprs and nearly everyone stopped using it when the s-expr syntax
>>> was dropped (a move pushed by salespeople who didn't understand their
>>> importance to Dylan's acceptance).
>>
>> Strange, strange, strange.
>> Not that salespeople did dumb things (that's normal).
>> I'm a bit surprised that people have preferred the S-expressions. Did 
>> anybody give reasons? Was the drop in support due to changes to the 
>> syntax, or was it just a correlation?
> 
> I am not surprised at all. Apparently, the main target audience 
> effectively were Lispers, and Lispers who have considerably experience 
> with s-expressions typically prefer them over more mainstream syntaxes.
> 
> 
Pascal,

I think you got the point that's being treated as fuzzy: people that 
learnt Lisp enough don't see the s-exps (or they physical consequence, 
the parentesis) as a problem at all.

Attempts to 'fix' this 'issue' bring too many disavantages to the new 
dialect that only in a very few cases it survives (as niche language, 
perhaps).

my .0199999...

--
Cesar Rabak
From: Alan Crowe
Subject: Re: shootout: implementing an interpreter for a simple procedural  language Minim
Date: 
Message-ID: <86odhw6bxm.fsf@cawtech.freeserve.co.uk>
Joachim Durchholz <··@durchholz.org> writes:

> I'm a bit surprised that people have preferred the S-expressions. Did 
> anybody give reasons?

I think we confuse ourselves when we frame the issue using
the phrase "preferred the S-expressions." I've seen old LISP
documents that wrote LISP code in filled paragraphs. It was
horrible.

Programmers evolved layout conventions that work well with
S-expressions.

For example

        2xyz
 ----------------
        2    2
   3 ( x  + y )

becomes

(/ (* 2 x y z)
   3 (+ (expt x 2) (expt y 2)))

COND clauses could be written as run-on sentences (cond
((test for this)(do this))((test for that)(do something
else))((test number three)(third action))((final test)(catch
all case)).

We don't do that. We lay it out in a two dimensional
rule-action table.

(cond
  ((test for this) (do this))
  ((test for that) (do something else))
  ((test number three) (third action))
  ((final test) (catch all case)))

My point is that both ways are S-expressions, but the Lisp
community makes a sharp distinction. Newbies are always
being told to indent their code properly, using the tools
built in to their editor. We see the first way as bad and
the second way as good. "S-expressions" straddles the
good/bad boundary. Asking people about their preferences,
using a boundary straddling term that includes what they
energetically reject gets the discussion off to an
irretrievably bad start.

Alan Crowe
Edinburgh
Scotland
From: Christopher Browne
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <604pjl6g0h.fsf@dba2.int.libertyrms.com>
Joachim Durchholz <··@durchholz.org> writes:
> George Neuner schrieb:
>> Actually Dylan had many supporters back when it used s-exprs.
>> Support began to fall away when the operator precedence syntax was
>> introduced.  At first programmers could use either syntax, but most
>> users preferred s-exprs and nearly everyone stopped using it when
>> the s-expr syntax was dropped (a move pushed by salespeople who
>> didn't understand their importance to Dylan's acceptance).
>
> Strange, strange, strange.
> Not that salespeople did dumb things (that's normal).
> I'm a bit surprised that people have preferred the S-expressions. Did
> anybody give reasons? Was the drop in support due to changes to the
> syntax, or was it just a correlation?

In a way, it's a "correlation."

Consider that the team that created Dylan had a rather Lispy bias
(David Moon?), and the initial version of the language was Lispy to
the point of having S-expr syntax.

So it falls out *obviously* that the initial adopters of Dylan were
generally Lisp-friendly (e.g. - willing or happy to use S-exprs).

The shift to a more Algol-like syntax was *certain* to lead to these
users being less happy with Dylan; the hope had been that the move to
a "more traditional" syntax (e.g. - something looking more like Algol
/ Pascal / C / C++) would attract more users.

I suppose they failed to recognize that it is a *severe* mistake to
push away early adopters.
-- 
let name="cbbrowne" and tld="linuxfinances.info" in String.concat ·@" [name;tld];;
http://linuxdatabases.info/info/internet.html
"Have you ever seen anything like that before?"
"Not while I've been a legal state of mind."
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185193403.401214.255820@w3g2000hsg.googlegroups.com>
On Jul 23, 10:07 am, Joachim Durchholz <····@durchholz.org> wrote:
> Andr� Thieme schrieb:
>
> > Counting lines makes not much sense for Lisp. Although it supports all
> > these programming paradigms it has a very unique style which will blow
> > up the LOC count in several cases. But from this it doesn't follow, that
> > coding takes longer.
>
> > This one liner:  (defun make-accumulator (n) (lambda (i) (incf n i)))
> > gets usually written in three visible lines:
> > (defun make-accumulator (n)
> >   (lambda (i)
> >     (incf n i)))
>
> There are two answers to that:
>
> 1. Coding doesn't take longer, but you can't place the same amount of
> code on a screenful, so debugging and maintenance will take longer.
> Note that your typical generic FPL not only fits on a line, it even
> takes less of a line; the syntactic Haskell equivalent of the above
> example would look like this:
>    make-accumulator N = incf N
> (No, Haskell isn't cheating, it simply doesn't have or need macros and
> quoting, so it can encode the same code with far less symbols.)
> Now that's 27 instead of 52 characters, which means I can put nearly
> double the code on a single line without cramming it.
> (I'd expect OCaml to be slightly more verbose. Jon?)
>
> 2. You can always count nodes in the AST instead of lines of code. For
> the above example, you'd end up at roughly the same figures for Lisp and
> your generic FPL, but as soon as you declare macros in Lisp, the FPL
> needs less nodes.
> (There may be other effects. Jon?)
>
> Regards,
> Jo

This turned into a pissing contest,  why doesn't this discussion going
solely on comp.lang.functional without crossposting so it will be seen
only by those people who care if language x *better* than language y
because it has a biggest , I mean smallest line count.
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87lkd78rcf.fsf@pu100877.student.princeton.edu>
Joachim Durchholz <··@durchholz.org> writes:

> (No, Haskell isn't cheating, it simply doesn't have or need macros and
> quoting, so it can encode the same code with far less symbols.)

Doesn't have?  Yes.  Doesn't need?  People who started Liskell or
Template Haskell would probably disagree.

Tamas
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a49b64$0$1609$ed2619ec@ptn-nntp-reader02.plus.net>
Tamas Papp wrote:
> Doesn't have?  Yes.  Doesn't need?  People who started Liskell or
> Template Haskell would probably disagree.

Sure, but the vast majority of OCaml and Haskell coders who could use their
excellent macro systems choose not to.

I do not doubt that Lisp's macros are extremely useful for Lisp programmers
but I would contest any generalization that macros are necessary for
programming or that all languages should have macro systems, which is an
opinion often put forward on c.l.l.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <d2wswrdy8h.fsf@hod.lan.m-e-leypold.de>
> I do not doubt that Lisp's macros are extremely useful for Lisp programmers
> but I would contest any generalization that macros are necessary for
> programming or that all languages should have macro systems, which is an
> opinion often put forward on c.l.l.

ACK.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gk8pvF3gep3fU1@mid.individual.net>
Jon Harrop wrote:
> Tamas Papp wrote:
>> Doesn't have?  Yes.  Doesn't need?  People who started Liskell or
>> Template Haskell would probably disagree.
> 
> Sure, but the vast majority of OCaml and Haskell coders who could use their
> excellent macro systems choose not to.
> 
> I do not doubt that Lisp's macros are extremely useful for Lisp programmers
> but I would contest any generalization that macros are necessary for
> programming or that all languages should have macro systems, which is an
> opinion often put forward on c.l.l.

...or you just misunderstand them.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a4a563$0$1632$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> There are two answers to that:
> 
> 1. Coding doesn't take longer,

Even if there is four times as much code?

> but you can't place the same amount of 
> code on a screenful, so debugging and maintenance will take longer.

Yes. I would expect that to result in superlinear degredation of development
speed with respect to LOC.

> Note that your typical generic FPL not only fits on a line, it even
> takes less of a line; the syntactic Haskell equivalent of the above
> example would look like this:
>    make-accumulator N = incf N

A literal translation:

# (fun (+) n i -> n := !n + i);;
- : ('a -> 'b -> 'a) -> 'a ref -> 'b -> unit = <fun>

A useful translation to count the number of elements in a list:

# let length list = List.fold_left (fun n _ -> n+1) 0 list;;
val length : 'a list -> int = <fun>

To count the number of elements in any container:

# let length fold seq = fold (fun n _ -> n+1) 0 seq;;
val length : ((int -> 'a -> int) -> int -> 'b -> 'c) -> 'b -> 'c = <fun>

To sum the int elements in any container:

# let sum fold seq = fold (+) 0 seq;;
val sum : ((int -> int -> int) -> int -> 'a -> 'b) -> 'a -> 'b = <fun>

To sum elements of any type in any container:

# let sum add zero fold seq = fold add zero seq;;
val sum : 'a -> 'b -> ('a -> 'b -> 'c -> 'd) -> 'c -> 'd = <fun>

and so on. In practice, this code would never see the light of day because
real accumulators have too little in common to make such factoring useful.
For example, if you want to sum the elements of a floating point vector you
would take their magnitude into account to avoid unnecessary numerical
errors. If you want to "accumulate" strings (i.e. a StringBuilder) you
would amortize appends to reduce complexity from quadratic to linear.

> 2. You can always count nodes in the AST instead of lines of code.

Lisp's verbosity stems primarily from its use of whitespace and parentheses
as well as a lack of pattern matching. You can see this in almost any
comparable programs written in the two languages (or any languages with the
similar features, e.g. Haskell vs Scheme). Look at the intersect routines
from my ray tracer. First the Lisp:

(defun intersect (orig dir scene)
  (labels ((aux (lam normal scene)
             (let* ((center (sphere-center scene))
                    (lamt (ray-sphere orig
                                      dir
                                      center
                                      (sphere-radius scene))))
               (if (>= lamt lam)
                   (values lam normal)
                   (etypecase scene
                     (group
                      (dolist (kid (group-children scene))
                        (setf (values lam normal)
                              (aux lam normal kid)))
                      (values lam normal))
                     (sphere
                      (values lamt (unitise
                                    (-v (+v orig (*v lamt dir)) center)))))))))
    (aux infinity zero scene)))

Then the OCaml:

let rec intersect o d (l, _ as hit) (c, r, s) =
  let l' = ray_sphere o d c s in
  if l' >= l then hit else match s with
    [] -> l', unitise (o +| l' *| d -| c)
  | ss -> List.fold_left (intersect o d) hit ss

Look at the core interpreters in this Minim shootout. First the OCaml:

let rec test = function
  | `Comp(c, x, y) -> c !x !y
  | `And(f, g) -> test f && test g
  | `Or(f, g) -> test f || test g
  | `Not f -> not(test f)

let rec statement pc = function
  | `Assign(x, y) -> x := !y; pc + 1
  | `Incr x -> incr x; pc + 1
  | `Decr x -> decr x; pc + 1
  | `If(p, t, f) -> statement pc (if test p then t else f)
  | `Goto tag -> !tag
  | `PrintString s -> print_string s; pc + 1
  | `Print x -> print_int(!x); pc + 1
  | `Input x -> x := int_of_string(input_line stdin); pc + 1

let rec run program pc = run program (statement pc program.(pc))

Then the Qi:

(define run
   {program --> env}
    Program -> (run-loop Program Program []))

(define run-loop
   {program --> program --> env --> env}
    [] _ Env -> Env
    [nl | Ss] Program Env -> (do (output "~%") (run-loop Ss Program
Env))
    [Tag | Ss] Program Env -> (run-loop Ss Program Env) where (symbol?
Tag)
    [[goto Tag] | _] Program Env -> (run-loop (go Tag Program) Program
Env)
    [[Var is Val] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (compute-val Val Env)
Env))
    [[++ Var] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (+ 1 (look-up Var Env))
Env))
    [[-- Var] | Ss] Program Env
    -> (run-loop Ss Program (change-env Var (- (look-up Var Env) 1)
Env))
    [[if Test then DoThis else DoThat] | Ss] Program Env
     -> (if (perform-test? Test Env)
            (run-loop [DoThis | Ss] Program Env)
            (run-loop [DoThat | Ss] Program Env))
    [[print M] | Ss] Program Env -> (do (output "~A" (look-up M Env))
                                        (run-loop Ss Program Env))
                                                                               
where (symbol? M)
    [[print M] | Ss] Program Env -> (do (output "~A" M)
                                        (run-loop Ss Program Env))
    [[input Var] | Ss] Program Env
       -> (run-loop Ss Program (change-env Var (input+ : number)
Env)))

(define compute-val
  {val --> env --> number}
   N _ -> N     where (number? N)
   Var Env -> (look-up Var Env) where (symbol? Var))

(define go
  {symbol --> program --> program}
   Tag [Tag | Program] -> Program
   Tag [_ | Program] -> (go Tag Program)
   Tag _ -> (error "cannot go to tag ~A~%" Tag))

(define perform-test?
  {test --> env --> boolean}
   [Test1 and Test2] Env -> (and (perform-test? Test1 Env)
                                 (perform-test? Test2 Env))
   [Test1 or Test2] Env -> (or (perform-test? Test1 Env)
                               (perform-test? Test2 Env))
   [not Test] Env -> (not (perform-test? Test Env))
   [V1 = V2] Env -> (= (compute-val V1 Env) (compute-val V2 Env))
   [V1 > V2] Env -> (> (compute-val V1 Env) (compute-val V2 Env))
   [V1 < V2] Env -> (< (compute-val V1 Env) (compute-val V2 Env)))

(define change-env
   {symbol --> number --> env --> env}
    Var Val [] -> [(@p Var Val)]
    Var Val [(@p Var _) | Env] -> [(@p Var Val) | Env]
    Var Val [Binding | Env] -> [Binding | (change-env Var Val Env)])

(define look-up
  {symbol --> env --> number}
   Var [] -> (error "~A is unbound.~%" Var)
   Var [(@p Var Val) | _] -> Val
   Var [_ | Env] -> (look-up Var Env))

> For 
> the above example, you'd end up at roughly the same figures for Lisp and
> your generic FPL, but as soon as you declare macros in Lisp, the FPL
> needs less nodes.

Are you saying that macros reduce code size?

> (There may be other effects. Jon?)

Pattern matching is the single biggest advantage and is the main reason why
OCaml, SML, Haskell and F# are all much more concise than Common Lisp. Look
at the amount of code doing destructing in the above examples.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a4d02f$0$1594$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> Jon Harrop wrote:
>> Lisp's verbosity stems primarily from its use of whitespace and
>> parentheses as well as a lack of pattern matching.
> 
> Of course, if a lisp coder really wants pattern matching, they just
> load a pattern matcher / unifier.

Greenspun. If you put a couple of years into it you'll be where Mark is now
with Qi. If you work really hard for another 30 years you might get to
where OCaml, Haskell or F# are now.

> Actually, much of the shortening is coming from simply using
> shorter or single-character identifiers in your ML code where Lisp (or
> apparently Qi) authors would use more meaningful ones - hardly a
> sensible comparison.

On the contrary, when trying to compare brevity I see no merit in ignoring
verbosity.

> One can write Qi or Lisp with single-character 
> identifiers. It's just not considered particularly good style. 

It also makes little difference (see below).

> I can rewrite a pattern match from Mark's Qi into something a little
> more like your ML in style:
> 
>  [[if Test then DoThis else DoThat] | Ss] Program Env
>      -> (if (perform-test? Test Env)
>             (run-loop [DoThis | Ss] Program Env)
>             (run-loop [DoThat | Ss] Program Env))
> 
> ===>
> 
> [[if X then A else B] | Ss] P E ->
>         (if (pt X E) (rl [A | Ss] P E) (rl [B | Ss] P E))
> 
> Oh look, I've "halved" the length (if you're measuring LOC, which is a
> ridiculous measure anyway for most languages, not just lisp or ML). Of
> course it's also got less readable, like your (but not all) ML.

To be fair, I am sure you will want to take the OCaml:

  `If(p, t, f) -> statement pc (if test p then t else f)

and perform equivalent compression:

  `If(p, f, g) -> s n (if t p then f else g)

As you can see, the compressed OCaml is significantly shorter than the
compressed Qi. The only useful conclusion we can draw from this is that the
length of identifiers was not, in fact, relevant.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a50cab$0$1625$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> You conflate another issue: your ML "if" match line is not doing the
> same thing as Mark's Qi "if" match line (hint: when/if you work out why
> it's not, you'll also work out why it's both unsurprising and
> uninteresting your ML was faster than Mark's Qi interpreter).

Sounds like another triumph of hope over reality.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a5b770$0$1618$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> Jonnie wrote:
>> Sounds like another triumph of hope over reality
> 
> I don't think anyone's gonna buy that, Jonnie boy.
> The Qi and ML programs as a whole may have similar
> effects, but hey, I could drive or skate to
> the shops too.

So you're saying that the comparison is grossly unfair in some unspecified
way. I don't suppose you could be more specific?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a613b6$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> Jon Harrop wrote:
>> So you're saying that the comparison is grossly unfair in some
>> unspecified way.
> 
> No, I was saying your programs are different.  You're desperately
> trying to change the subject - you (invalidly) tried to compare
> the length of your if match line and mark's, when it is clear
> that they are not doing the same thing.

What exactly do you believe they are doing differently?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f838p4$h1j$1@wildfire.prairienet.org>
Cesar Rabak wrote:
> Jon Harrop escreveu:
>> Pattern matching is the single biggest advantage and is the main 
>> reason why OCaml, SML, Haskell and F# are all much more concise 
>> than Common Lisp. 
> Humm... I still find your comparison loaded: you rule out the use of 
> libraries for pattern matching in Lisp. Why?

Because it sells books.

-- 
Dan
www.prairienet.org/~dsb/
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f83hpj$3d2$1@aioe.org>
Dan Bensen escreveu:
> Cesar Rabak wrote:
>> Jon Harrop escreveu:
>>> Pattern matching is the single biggest advantage and is the main 
>>> reason why OCaml, SML, Haskell and F# are all much more concise than 
>>> Common Lisp. 
>> Humm... I still find your comparison loaded: you rule out the use of 
>> libraries for pattern matching in Lisp. Why?
> 
> Because it sells books.
> 
I see. . .
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <9jk5sqrudg.fsf@hod.lan.m-e-leypold.de>
> Dan Bensen escreveu:
>> Cesar Rabak wrote:
>>> Jon Harrop escreveu:
>>>> Pattern matching is the single biggest advantage and is the main
>>>> reason why OCaml, SML, Haskell and F# are all much more concise
>>>> than Common Lisp.
>>> Humm... I still find your comparison loaded: you rule out the use
>>> of libraries for pattern matching in Lisp. Why?
>> Because it sells books.
>>
> I see. . .

Actually I don't see: First I do not understand "Because it sells
books" and second, should that really refer to the fact that Jon has
written a book (and selling it for money), it still got me confused:

 1. Jon didn't hype his book in this thread.
 
 2. That someone is working in a given subject area X and actually is
    making money from it -- is that disqualifying him from making
    useful and true statements on usenet? As opposed to all the people
    around with no history in area X and no success?

I'm puzzled.

Regards -- Markus
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f88nqg$anv$1@wildfire.prairienet.org>
 >>>> Jon Harrop escreveu:
 >>>>> Pattern matching ... is the main reason why OCaml, SML,
 >>>>> Haskell and F# are all much more concise than Common Lisp.

 >>> Cesar Rabak wrote:
 >>>> I still find your comparison loaded: you rule out the use
 >>>> of libraries for pattern matching in Lisp. Why?

 >> Dan Bensen escreveu:
 >>> Because it sells books.

Markus E Leypold wrote:
 > Actually I don't see:
 >  1. Jon didn't hype his book in this thread.

First of all, Marcus, Jon's signature does advertise his book, so
he has directly advertised his book in this thread simply by posting
to it.  As for "hyping", it's not necessary to promote the book
directly.  It may be enough to draw programmers toward OCaml by making
comparisons that are unfairly biased against Lisp, which Jon has been
doing repeatedly.

 >  2. That someone is working in a given subject area X and actually is
 >     making money from it -- is that disqualifying him from making
 >     useful and true statements on usenet?

Markus, do you consider it "true and useful" to say that pattern-
matching libraries are "Greenspunning"?  Do you think ML languages
are "much more concise" than Lisp?  I don't think those statements
are either true or useful.  I don't think libraries and simple macros
are Greenspunning, and I don't think the ML family is "much" more
concise than Lisp when you take macros into account.

-- 
Dan
www.prairienet.org/~dsb/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a850de$0$1616$ed2619ec@ptn-nntp-reader02.plus.net>
Dan Bensen wrote:
> First of all, Marcus, Jon's signature does advertise his book, so
> he has directly advertised his book in this thread simply by posting
> to it.  As for "hyping", it's not necessary to promote the book
> directly.  It may be enough to draw programmers toward OCaml by making
> comparisons that are unfairly biased against Lisp, which Jon has been
> doing repeatedly.

Are you saying that Mark is conspiring to produce benchmarks that show Lisp
in a bad light?

> Markus, do you consider it "true and useful" to say that pattern-
> matching libraries are "Greenspunning"?  Do you think ML languages
> are "much more concise" than Lisp?  I don't think those statements
> are either true or useful.  I don't think libraries and simple macros
> are Greenspunning, and I don't think the ML family is "much" more
> concise than Lisp when you take macros into account.

Even though both languages have macros?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <aqbqdztde8.fsf@hod.lan.m-e-leypold.de>
Dan,

>  >>>> Jon Harrop escreveu:
>  >>>>> Pattern matching ... is the main reason why OCaml, SML,
>  >>>>> Haskell and F# are all much more concise than Common Lisp.
>
>  >>> Cesar Rabak wrote:
>  >>>> I still find your comparison loaded: you rule out the use
>  >>>> of libraries for pattern matching in Lisp. Why?
>
>  >> Dan Bensen escreveu:
>  >>> Because it sells books.
>
> Markus E Leypold wrote:
>  > Actually I don't see:
>  >  1. Jon didn't hype his book in this thread.
>
> First of all, Marcus, Jon's signature does advertise his book, so
> he has directly advertised his book in this thread simply by posting
> to it. 

Oh I see. How bad. If you follow my mail domain, you'll also find a
business web site (now somewhat out dated). Does that make me a
spammer too? And do you click on every link in signatures? I really
don't see how the word "hype" would apply to posting a link in a
signature. And what about www.prairienet.org/~dsb/? What about people
having their their company name in the sig?

> As for "hyping", it's not necessary to promote the book
> directly.  It may be enough to draw programmers toward OCaml by making
> comparisons that are unfairly biased against Lisp, which Jon has been
> doing repeatedly.

And pulling them to himself just by alienating them to Lisp. Don't be
ridiculous. Jon is not the only OCaml developer around.

>  >  2. That someone is working in a given subject area X and actually is
>  >     making money from it -- is that disqualifying him from making
>  >     useful and true statements on usenet?
>
> Markus, do you consider it "true and useful" to say that pattern-
> matching libraries are "Greenspunning"?  Do you think ML languages

Dan, actually I considered that as something that happens when the
discussion has alread detoriated enough - which certainly wasn't Jon's
doing alone. My dictionary doesn't contain the word "greenspunning" as
a verb, but I assume that Jon referred to "Greenspun's Tenth Rule"
which is, given the context, ironic, but, I think expresses very well,
why Jon distains pattern matching libraries in Lisp and thinks that
pattern matching belongs into the core language. So indeed the hint
'Greenspun' does communicate the argument very concisly.

> are "much more concise" than Lisp?  

Unfortunately I'm programming in ML languages rather than Lisp,
preferrably in OCaml. So ... -- I'm certainly biased. Regarding
pattern matching -- Yes, I think ML (and Haskell pattern matching is
more concise than anything of that kind I have seen in Lisp so far,
and certainly more than nesting of conditional statements. In ML it
also ties in rather nicely with the type system: In many cases the
compiler will warn you about forgotten cases -- something I imagine
the Lisp systems must have difficulties with owing to the dynamic type
system.

> I don't think those statements are either true or useful.

> I don't think libraries and simple macros are Greenspunning, and I

I wonder what you think "greenspunning" actually is?

> don't think the ML family is "much" more concise than Lisp when you
> take macros into account.

You believe that. Other people believe differently. How can you reach
agreement? Certainly not by ad hominem attacks and attacking straw men
(what, at the end of the day, the argument "he sells book" actually
is. And if I consider further, trying to contradict my lack of
understanding of this argument by constructing a case against JH by
some other route ("Greenspunning", "It's wrong than ML is more
concise") is also a strawman, because it makes the "he sells books"
argument not any more valid, not one iota).

Regards -- Markus

(Who think's that some of the participants in this slug fest should
better their logic by reading e.g. here:
http://www.nizkor.org/features/fallacies)
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8bue6$et6$1@wildfire.prairienet.org>
 > Regards -- Markus (Who think's that some of the participants in
 > this slug fest should better their logic by reading e.g. here:
 > http://www.nizkor.org/features/fallacies)

Markus, I've already KF'd you, because I find your posts misleading
and argumentative.  But since you went so far as to cite logical
fallacies, I'll describe the fallacies that you've been commiting.
Your main problem seems to be an inability to address comments
without distorting them, which is classic trolling behavior.

 >> Markus E Leypold wrote:
 >>  > Actually I don't see:
 >>  >  1. Jon didn't hype his book in this thread.
 >>
 >> First of all, Marcus, Jon's signature does advertise his book, so
 >> he has directly advertised his book in this thread simply by posting
 >> to it.

Markus E.L. wrote:
 > Oh I see. How bad.

Straw man.
http://www.nizkor.org/features/fallacies/straw-man.html

I never said advertising was bad.  I only suggested that your
statement was inaccurate, because Jon hypes OCaml and advertises
his book.

 >> As for "hyping", it's not necessary to promote the book directly.
 >> It may be enough to draw programmers toward OCaml by making
 >> comparisons that are unfairly biased against Lisp, which Jon
 >> has been doing repeatedly.

 > And pulling them to himself just by alienating them to Lisp.
 > Don't be ridiculous. Jon is not the only OCaml developer around.

Red herring.
http://www.nizkor.org/features/fallacies/red-herring.html

The question is not whether other OCaml developers post to the usenet,
but whether Jon has been trolling.  He has been the only developer
regularly making comparisons that are unfairly biased against Lisp.

 >> Markus, do you consider it "true and useful" to say that
 >> pattern-matching libraries are "Greenspunning"?

 > I think expresses very well, why Jon distains pattern matching
 > libraries in Lisp and thinks that pattern matching belongs into
 > the core language.  So indeed the hint 'Greenspun' does communicate
 > the argument very concisly.

Red herring.

The question is not whether the term expresses a sentiment, but
whether the sentiment is warranted.  I don't think a simple library
counts as "Greenspunning".

 >> Do you think ML languages are "much more concise" than Lisp?

 > Unfortunately I'm programming in ML languages rather than Lisp,
 > preferrably in OCaml. So ... -- I'm certainly biased. Regarding
 > pattern matching -- Yes, I think ML (and Haskell pattern matching is
 > more concise than anything of that kind I have seen in Lisp so far,
 > and certainly more than nesting of conditional statements.

Red herring.

We're not talking about the conciseness of pattern matching alone.
We're talking about the overall conciseness of each language,
based on pattern matching, macros, and all other features.

 >> I don't think libraries and simple macros are Greenspunning
 > I wonder what you think "greenspunning" actually is?

Regardless of its exact definition, it's a derogatory term that
suggests an inefficient, unmaintainable, unnecessary implementation
of features built into another language.  Macros make Lisp pattern-
matching libraries both efficient and maintainable, and ML's lack
of macros makes these libraries necessary if one wants both macros
and pattern matching.

 >> I don't think the ML family is "much" more concise than Lisp
 >> when you take macros into account.

 > You believe that. Other people believe differently. How can you reach
 > agreement? Certainly not by ad hominem attacks and attacking straw men
 > (what, at the end of the day, the argument "he sells book" actually
 > is.

Straw man.

No-one objects to selling books.  Only to trolling.

 > And if I consider further, trying to contradict my lack of
 > understanding of this argument

I don't contradict your lack of understanding at all, Markus.
I totally agree that you lack a basic understanding of Jon's
trolling and what other people have been saying to you.

 > by constructing a case against JH by some other route
 > ("Greenspunning", "It's wrong than ML is more concise")
 > is also a strawman, because it makes the "he sells books"
 > argument not any more valid, not one iota).

Obvious straw man, made even worse by appealing to the straw-man
fallacy yourself.  It's Jon who's making the Greenspunning arguments,
and no-one objects to selling books.  I think you're another troll.

-- 
Dan
www.prairienet.org/~dsb/
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185528951.181897.25890@k79g2000hse.googlegroups.com>
On Jul 27, 7:11 am, Dan Bensen <··········@cyberspace.net> wrote:
>  > Regards -- Markus (Who think's that some of the participants in
>  > this slug fest should better their logic by reading e.g. here:
>  >http://www.nizkor.org/features/fallacies)
>
> Markus, I've already KF'd you, because I find your posts misleading
> and argumentative.  But since you went so far as to cite logical
> fallacies, I'll describe the fallacies that you've been commiting.
> Your main problem seems to be an inability to address comments
> without distorting them, which is classic trolling behavior.
>
>  >> Markus E Leypold wrote:
>  >>  > Actually I don't see:
>  >>  >  1. Jon didn't hype his book in this thread.
>  >>
>  >> First of all, Marcus, Jon's signature does advertise his book, so
>  >> he has directly advertised his book in this thread simply by posting
>  >> to it.
>
> Markus E.L. wrote:
>
>  > Oh I see. How bad.
>
> Straw man.http://www.nizkor.org/features/fallacies/straw-man.html
>
> I never said advertising was bad.  I only suggested that your
> statement was inaccurate, because Jon hypes OCaml and advertises
> his book.
>
>  >> As for "hyping", it's not necessary to promote the book directly.
>  >> It may be enough to draw programmers toward OCaml by making
>  >> comparisons that are unfairly biased against Lisp, which Jon
>  >> has been doing repeatedly.
>
>  > And pulling them to himself just by alienating them to Lisp.
>  > Don't be ridiculous. Jon is not the only OCaml developer around.
>
> Red herring.http://www.nizkor.org/features/fallacies/red-herring.html
>
> The question is not whether other OCaml developers post to the usenet,
> but whether Jon has been trolling.  He has been the only developer
> regularly making comparisons that are unfairly biased against Lisp.
>
>  >> Markus, do you consider it "true and useful" to say that
>  >> pattern-matching libraries are "Greenspunning"?
>
>  > I think expresses very well, why Jon distains pattern matching
>  > libraries in Lisp and thinks that pattern matching belongs into
>  > the core language.  So indeed the hint 'Greenspun' does communicate
>  > the argument very concisly.
>
> Red herring.
>
> The question is not whether the term expresses a sentiment, but
> whether the sentiment is warranted.  I don't think a simple library
> counts as "Greenspunning".
>
>  >> Do you think ML languages are "much more concise" than Lisp?
>
>  > Unfortunately I'm programming in ML languages rather than Lisp,
>  > preferrably in OCaml. So ... -- I'm certainly biased. Regarding
>  > pattern matching -- Yes, I think ML (and Haskell pattern matching is
>  > more concise than anything of that kind I have seen in Lisp so far,
>  > and certainly more than nesting of conditional statements.
>
> Red herring.
>
> We're not talking about the conciseness of pattern matching alone.
> We're talking about the overall conciseness of each language,
> based on pattern matching, macros, and all other features.
>
>  >> I don't think libraries and simple macros are Greenspunning
>  > I wonder what you think "greenspunning" actually is?
>
> Regardless of its exact definition, it's a derogatory term that
> suggests an inefficient, unmaintainable, unnecessary implementation
> of features built into another language.  Macros make Lisp pattern-
> matching libraries both efficient and maintainable, and ML's lack
> of macros makes these libraries necessary if one wants both macros
> and pattern matching.
>
>  >> I don't think the ML family is "much" more concise than Lisp
>  >> when you take macros into account.
>
>  > You believe that. Other people believe differently. How can you reach
>  > agreement? Certainly not by ad hominem attacks and attacking straw men
>  > (what, at the end of the day, the argument "he sells book" actually
>  > is.
>
> Straw man.
>
> No-one objects to selling books.  Only to trolling.
>
>  > And if I consider further, trying to contradict my lack of
>  > understanding of this argument
>
> I don't contradict your lack of understanding at all, Markus.
> I totally agree that you lack a basic understanding of Jon's
> trolling and what other people have been saying to you.
>
>  > by constructing a case against JH by some other route
>  > ("Greenspunning", "It's wrong than ML is more concise")
>  > is also a strawman, because it makes the "he sells books"
>  > argument not any more valid, not one iota).
>
> Obvious straw man, made even worse by appealing to the straw-man
> fallacy yourself.  It's Jon who's making the Greenspunning arguments,
> and no-one objects to selling books.  I think you're another troll.

And I think you're right.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9c727$0$1631$ed2619ec@ptn-nntp-reader02.plus.net>
Dan Bensen wrote:
> I don't think a simple library counts as "Greenspunning".

Yet a Lisper coined the phrase to describe the reinvention of lists in C
code. A good pattern match compiler is thousands of lines of code, compared
to ~10 for a list.

> Macros make Lisp pattern-matching libraries both efficient...

That belief is contrary to all of the quantitative evidence objectively
gathered so far:

http://www.ffconsultancy.com/languages/ray_tracer/
http://www.lambdassociates.org/studies/study10.htm
http://groups.google.co.uk/group/comp.lang.lisp/msg/f47aa6043e79770e?hl=en&

> ML's lack of macros...

camlp4:

http://www.ocaml-tutorial.org/camlp4_3.10

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ivr6mu87oc.fsf@hod.lan.m-e-leypold.de>
>  > Regards -- Markus (Who think's that some of the participants in
>  > this slug fest should better their logic by reading e.g. here:
>  > http://www.nizkor.org/features/fallacies)
>
> Markus, I've already KF'd you, because I find your posts misleading
> and argumentative.  But since you went so far as to cite logical
> fallacies, I'll describe the fallacies that you've been commiting.
> Your main problem seems to be an inability to address comments
> without distorting them, which is classic trolling behavior.
>
>  >> Markus E Leypold wrote:
>  >>  > Actually I don't see:
>  >>  >  1. Jon didn't hype his book in this thread.
>  >>
>  >> First of all, Marcus, Jon's signature does advertise his book, so
>  >> he has directly advertised his book in this thread simply by posting
>  >> to it.
>
> Markus E.L. wrote:
>  > Oh I see. How bad.
>
> Straw man.
> http://www.nizkor.org/features/fallacies/straw-man.html
>
> I never said advertising was bad.  I only suggested that your
> statement was inaccurate, because Jon hypes OCaml and advertises
> his book.
>
>  >> As for "hyping", it's not necessary to promote the book directly.
>  >> It may be enough to draw programmers toward OCaml by making
>  >> comparisons that are unfairly biased against Lisp, which Jon
>  >> has been doing repeatedly.
>
>  > And pulling them to himself just by alienating them to Lisp.
>  > Don't be ridiculous. Jon is not the only OCaml developer around.
>
> Red herring.
> http://www.nizkor.org/features/fallacies/red-herring.html
>
> The question is not whether other OCaml developers post to the usenet,
> but whether Jon has been trolling.  He has been the only developer
> regularly making comparisons that are unfairly biased against Lisp.

It's the "unfair" actually that's under discussion. And I don't think
I've to defend my arguments against the allegation of them being
fallacies: I freely admit they are rhetoric, not more. To actaully
refute your "Jon is a troll" argument, it suffices to point out that
neither "he sells books" nor "he has a link to his book in his
signature" do constitute conclusive arguments in support of your
proposition.

I don't think I need to say more and the reference to
nizkor.org/features/fallacies doesn't wash, since were are not
discussing any more: Since when would I answer to non-arguments with
logic instead of just saying "step back, don't be ridiculous -- don't
you realize how far fetched you attempts to construct a case are?".

If you _really_ want to continue your "Jon is a troll" argument (and
from the responses I get as compared to the responses on Lisp
benchmarking in this thread I get the impression that perhaps more
people on c.l.l. are interested in branding JH as a troll than in
Lisp) -- if suggest we start again from the beginning: State your
proposition and than gather evidence in support. All that "he has
written a book", "he sells a book", "he has a link in his signature",
"somewhere [no reference!] he even admitted ..." is absolutely
inconclusive, not good for anything and to me suspiciously sounds like
a group of people is desperately groping for evidence to convict
somebody they consider already guilty.

If you all need kangaroo courts in c.l.l. to keep the quiet and free
of controversial posting: By all means continue. Without my further
participation of course, sicne I've already said most of what I had to
say.

On the other side I see an enforcement problem (c.l.l. is not
moderated AFAIK) and suggest you consider that -- assuming for the
moment, Jon really were a troll -- that the best cure against troll
has been so far to ignore them (see Xah Lee :-). Obviously there is no
sufficient majority for that in c.l.l which should give you and idea
how trollish JH probably is.

>  >> Markus, do you consider it "true and useful" to say that
>  >> pattern-matching libraries are "Greenspunning"?
>
>  > I think expresses very well, why Jon distains pattern matching
>  > libraries in Lisp and thinks that pattern matching belongs into
>  > the core language.  So indeed the hint 'Greenspun' does communicate
>  > the argument very concisly.
>
> Red herring.

I'm tempted to say <insult> here: I just explained to you, that I
don't know 'Greenspun' as a verb, so I'm not sure what it really means
-- and I explain. Of course I now get accused of "Red Herring". My
bad. Shouldn't explain.

> The question is not whether the term expresses a sentiment, but
> whether the sentiment is warranted.  I don't think a simple library
> counts as "Greenspunning".

Well, but obviously it's not so clear cut: The library provides
features, other FPs have already in the core language. Jon argued that
Grenspun's 10th rule applies. You argue "it doesn't count as ..." --
an argument where I still percieve that you take "greenspunning" as
negative or a kind of insult. Admittedly I don't know what to make of
it. Is the hint to Greenspuns 10th rule "wrong" in any way or
"useless"? Or what is the argument you're trying to build: AFAI
understood it was something on the lines of: Jon is a troll. To prove
that you have to disprove my statement that Jons contributions are
"true and useful". So you take on reply and try to prove it's
nonsense, wrong or insulting (I'm not sure what). I think you can the
fallacy there. I don't see were that should lead us to.

>
>  >> Do you think ML languages are "much more concise" than Lisp?
>
>  > Unfortunately I'm programming in ML languages rather than Lisp,
>  > preferrably in OCaml. So ... -- I'm certainly biased. Regarding
>  > pattern matching -- Yes, I think ML (and Haskell pattern matching is
>  > more concise than anything of that kind I have seen in Lisp so far,
>  > and certainly more than nesting of conditional statements.
>
> Red herring.

> We're not talking about the conciseness of pattern matching alone.
> We're talking about the overall conciseness of each language,
> based on pattern matching, macros, and all other features.

Not in this subthread. And not me with you, certainly. Jon's arguments
are useful and interesting, wether you like it or not, whereas your
shouting "Red Herring" at avery partial statement is not. Asking me,
wether "I find ... <whatever>" how that relates to the question what
Jon is, and I'm to bored and lazy now to look up THAT special fallacy,
but be assured: It is one: At least you're changing the topic.

Decide: Do you want to discuss Lisp with me? That was not how we
started and I've indicated that I'm probably not qualified and too
biased to be useful as an opponent regarding this question. I've never
decided against Lisp and for OCaml: It's just OCaml is good enough for
me, I don't have problem with the syntax, it is fast enough for me, it
has a useful type system and I met it at the time I needed it. There
you are. 

Or: Do you want to discuss about "Jon is a troll"? I alread said I
don't experience him like this and cannot see it so clear cut that his
statements are untrue or not useful. The example you brought so far is
unsuitable to discuss that point, because it's not clear wether it's
right or wrong and one example of a false or useless opinion or
statement wouldn't even support that a person is troll that is only
posting useless spam on usenet. Do we really need to continue this
topic? Your logic is flawed and that's it (and you're confusing the
question wether Jon is right with the question wether Jon is a troll:
Somthing you're not alone with: Other contributors to that thread
already have argued in the spirit of "I think he is wrong, so he is a
troll").


| > Humm... I still find your comparison loaded: you rule out the use of
| > libraries for pattern matching in Lisp. Why?

>
>  >> I don't think libraries and simple macros are Greenspunning
>  > I wonder what you think "greenspunning" actually is?

> Regardless of its exact definition, it's a derogatory term that
> suggests an inefficient, unmaintainable, unnecessary implementation
> of features built into another language.  

Well -- and how efficient IS pattern matching in Lisp? And if people
start to build their porgramming around pattern matching, don't you
think it makes a difference. I also noticed when quoting you
conveniently snipped the following piece of text from my last posting:

| In ML it also ties in rather nicely with the type system: In many
| cases the compiler will warn you about forgotten cases -- something
| I imagine the Lisp systems must have difficulties with owing to the
| dynamic type system.

> Macros make Lisp pattern-
> matching libraries both efficient and maintainable, and ML's lack
> of macros makes these libraries necessary if one wants both macros
> and pattern matching.

I don't see how ML's lack of macros makes any libraries in Lisp
necessary.

>
>  >> I don't think the ML family is "much" more concise than Lisp
>  >> when you take macros into account.
>
>  > You believe that. Other people believe differently. How can you reach
>  > agreement? Certainly not by ad hominem attacks and attacking straw men
>  > (what, at the end of the day, the argument "he sells book" actually
>  > is.

>
> Straw man.

No, it's not. It adresses a big issue in this thread: How people
discuss with each other and how they reach agreement: Some obviously
have decided that

   "why do you ..." "because he sells books" 

is a valid argument of some kind. It isn't and it won't ever become
(at least in a resonably rational community).

> No-one objects to selling books.  Only to trolling.

Which again is proven by "selling books"? Dan, if you got a quiet
moment, just go back to the post where I came into this thread (that
with the "because he sells books" quote). 

I'm not arguing pro/contra Lisp/OCaml or whatever. As I said: If I
think about it, I'm not qualified to do that and the arguments I hear
(_on the topic_, mind you) are not so obviously wrong or fallacious
that I could decide or see instantanously that a civiliced dispute is
unnecessary.

But I'm objecting to the way some people here communicate with others
-- and that includes you and this, your, post


>  > And if I consider further, trying to contradict my lack of
>  > understanding of this argument

> I don't contradict your lack of understanding at all, Markus.
> I totally agree that you lack a basic understanding of Jon's
> trolling and what other people have been saying to you.

Well, I see. I think that proves my point. Because I don't just take
your word for it (whatever), I'm bad and probably stupid (my "lack [of]
basic understanding of [...]  what other people have been saying to
you). Are you (are the others that have been trying to tell me) a
usenet god? Or doesn't the usually rule, that if you want somebody to
think you thought you have to convince them, also apply to you?

And what's more: My impression is that the Jon-Harrop-Opposition at
c.l.l. is a rather vocal minority with a lot of poison to spent. And
that IS my impression. You and the others will have to live with that,
because you just can't take it away.

>  > by constructing a case against JH by some other route
>  > ("Greenspunning", "It's wrong than ML is more concise")
>  > is also a strawman, because it makes the "he sells books"
>  > argument not any more valid, not one iota).
>
> Obvious straw man, made even worse by appealing to the straw-man
> fallacy yourself.  

Well, I regret, I don't see the straw man.

> It's Jon who's making the Greenspunning arguments,
> and no-one objects to selling books.  

No, no one. There is no wall, there are no wardens, there is no
prison, you're not here ... 

BTW: Did I say it was you making the "Greenspunning argument"
(whatever evil that might be, it certainly sounds mightily depraved)?

> I think you're another troll.

Aaah. The old "I don't like you, you're a troll" manoeuvre. A usenet
classic, indeed. Nice to meet you.

Yes, please plonk me and refrain from responding. :-)

I've spent too much time on this post anyway. Can't answer you at so
much length in future, but the basic front line should be clear (and
the basic strategies also have been tested, like yours of sprinkling
"Red Herring" and "Straw man" liberally into selective quoting of my
post -- perhaps I can even continue the discussion with myself in this
vein)

(Personal I think I'm fighting against kangaroo courts and for free
speech, but that is an aside for other readers only.)

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a94e5e$0$1595$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> You believe that. Other people believe differently. How can you reach
> agreement?

You can do objective and quantitative tests by writing equivalent programs
in different languages and measuring their verbosity, e.g. the ray tracer,
the symbolic simplifier, the Minim interpreter.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fw64469pyz.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. wrote:
>> You believe that. Other people believe differently. How can you reach
>> agreement?
>
> You can do objective and quantitative tests by writing equivalent programs
> in different languages and measuring their verbosity, e.g. the ray tracer,
> the symbolic simplifier, the Minim interpreter.

I think you got the message :-). I don't think those test provide
really hard irrefutable data, but at least some material for further
discussion and pondering. What I find so repugnant in many language
advocacy discussion how people already just know what is true and
right and so few ever are willing to go into formulating meaningful
questions and considering them in a more scientific spirit (meaning,
actually looking for answers instead of knowing them and trying to
convince the rest of the world. In my opinion that is what
distinguishes science from politics, not wether one got a research
grant or a PHD :-)).

Regards -- Markus
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uGeqi.21073$j7.379606@news.indigo.ie>
> Who think's that some of the participants in this slug fest should
> better their logic by reading e.g.

Here's one that's particularly relevant:
http://www.lambdassociates.org/fallacy.htm
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5ny7h28bal.fsf@hod.lan.m-e-leypold.de>
>> Who think's that some of the participants in this slug fest should
>> better their logic by reading e.g.
>
> Here's one that's particularly relevant:
> http://www.lambdassociates.org/fallacy.htm

To the discussion as a whole: yes. To the particular fallacy I've been critising: No.

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8cm87$ub7$1@aioe.org>
Markus E.L. escreveu:
> Dan,
[snipped]

> Regards -- Markus
> 
> (Who think's that some of the participants in this slug fest should
> better their logic by reading e.g. here:
> http://www.nizkor.org/features/fallacies)
> 
Markus,

It interesting yourself sent this link. It seems that bored people could 
easily maje a table for each of your recent post agains one of the 
fallacies described in your reference.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <bhy7h259ic.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. escreveu:
>> Dan,
> [snipped]
>
>> Regards -- Markus
>> (Who think's that some of the participants in this slug fest should
>> better their logic by reading e.g. here:
>> http://www.nizkor.org/features/fallacies)
>>
> Markus,
>
> It interesting yourself sent this link. It seems that bored people
> could easily maje a table for each of your recent post agains one of
> the fallacies described in your reference.

If you think so. But I think that would rather indicate that you
didn't get my point (which was not (a) perticipating in the Lisp
vs. OCaml discussion nor in disproving arguments regarding "Jon is a
Spammer" already made, but rather pointing out bad manners and that
there are no arguments (ao at least there were no arguments, so there
is no disproving them)).

Regards -- Markus
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ps2effzp.fsf@ma-patru.mathematik.uni-karlsruhe.de>
·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:

> Dan,
> 
> >  >>>> Jon Harrop escreveu:
> >  >>>>> Pattern matching ... is the main reason why OCaml, SML,
> >  >>>>> Haskell and F# are all much more concise than Common Lisp.
> >
> >  >>> Cesar Rabak wrote:
> >  >>>> I still find your comparison loaded: you rule out the use
> >  >>>> of libraries for pattern matching in Lisp. Why?
> >
> >  >> Dan Bensen escreveu:
> >  >>> Because it sells books.
> >
> > Markus E Leypold wrote:
> >  > Actually I don't see:
> >  >  1. Jon didn't hype his book in this thread.
> >
> > First of all, Marcus, Jon's signature does advertise his book, so
> > he has directly advertised his book in this thread simply by posting
> > to it. 
> 
> Oh I see. How bad. If you follow my mail domain, you'll also find a
> business web site (now somewhat out dated). Does that make me a spammer
> too? And do you click on every link in signatures? I really don't see how
> the word "hype" would apply to posting a link in a signature. And what
> about www.prairienet.org/~dsb/? What about people having their their
> company name in the sig?

The sig is only a problem because of his other behaviour.  As I said
before, his posts probably fit better for comp.lang.functional.  On the
other hand, I am reading comp.lang.lisp (no followups set there, so I
probably won't read answers to this message).  Maybe you can understand
that I do not want to hear the continued promotion of OCaml and F# in a
lisp newsgroup.  (And, BTW, it is often a very brain-dead promotion,
because the guy deliberately refuses to understand other people arguments.)

Nicolas
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pomyxi874e.fsf@hod.lan.m-e-leypold.de>
> ·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
>
>> Dan,
>> 
>> >  >>>> Jon Harrop escreveu:
>> >  >>>>> Pattern matching ... is the main reason why OCaml, SML,
>> >  >>>>> Haskell and F# are all much more concise than Common Lisp.
>> >
>> >  >>> Cesar Rabak wrote:
>> >  >>>> I still find your comparison loaded: you rule out the use
>> >  >>>> of libraries for pattern matching in Lisp. Why?
>> >
>> >  >> Dan Bensen escreveu:
>> >  >>> Because it sells books.
>> >
>> > Markus E Leypold wrote:
>> >  > Actually I don't see:
>> >  >  1. Jon didn't hype his book in this thread.
>> >
>> > First of all, Marcus, Jon's signature does advertise his book, so
>> > he has directly advertised his book in this thread simply by posting
>> > to it. 
>> 
>> Oh I see. How bad. If you follow my mail domain, you'll also find a
>> business web site (now somewhat out dated). Does that make me a spammer
>> too? And do you click on every link in signatures? I really don't see how
>> the word "hype" would apply to posting a link in a signature. And what
>> about www.prairienet.org/~dsb/? What about people having their their
>> company name in the sig?
>
> The sig is only a problem because of his other behaviour.  As I said

Behaviour? Perhaps just write to his teacher ... :-/. People, grow
up. This is usenet: The only way to enforce is not to respond. Since
c.l.l. obviously can't ignore JH's posts the problem AND there are
people who don't want them, this is a c.l.l. problem. And you will
have to live with it. And perhaps better not attack other people too,
who happen to defend Jon. And, last, to go back to the beginning of
these threads: No reason to bring your venom to c.l.f.

> before, his posts probably fit better for comp.lang.functional.  On the

Maybe. On the other side (without researching what Jon actaully
posted), I find it somehow doubtful that discussion of limits and
weaknesses of lisp won't have a place on c.l.l. In comparison:
Certainly I don't want to see discussions on the (suppossed or real)
weaknesses of GC and FP take place only at c.l.ada (they are biased),
but rather in c.l.f.

> other hand, I am reading comp.lang.lisp (no followups set there, so I
> probably won't read answers to this message).  

My rule is: You post accusations / judgements on people on group X,
you read my answer there. I've been ignoring the follow-ups in this
threads consistently, especially if people post from and to c.l.l. and
set f'up to c.l.f. That is the curse of crossposting.

Call me a troll. :-) You wouldn't be the first one.

> Maybe you can understand
> that I do not want to hear the continued promotion of OCaml and F# in a
> lisp newsgroup.  (And, BTW, it is often a very brain-dead promotion,
> because the guy deliberately refuses to understand other people arguments.)

My impression is, that there are two parties that don't want to
understand. I have seen enough instances of JH listening to arguements
that I wonder why the "problem" is so prevalent in c.l.l.

Regards -- Markus 

     ( Whom the style of the JH opposition alone already has convinced
       that the problem is not JH alone. )
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8cqe5$f7g$1@aioe.org>
Markus E.L. escreveu:
>> ·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
[snipped]

> 
> Behaviour? Perhaps just write to his teacher ... :-/. People, grow
> up. This is usenet: The only way to enforce is not to respond. Since
> c.l.l. obviously can't ignore JH's posts the problem AND there are
> people who don't want them, this is a c.l.l. problem. And you will
> have to live with it. And perhaps better not attack other people too,
> who happen to defend Jon. And, last, to go back to the beginning of
> these threads: No reason to bring your venom to c.l.f.

Hey man! How inconsistent are willing to go? Your argument: "People, 
grow up. This is usenet:" _But_ if people respond to Jon's crossposted 
posts you childly complain: "No reason to bring your venom to c.l.f."

What's the e-address of the teacher of yours?

> 
>> before, his posts probably fit better for comp.lang.functional.  On the
> 
> Maybe. On the other side (without researching what Jon actaully
> posted), I find it somehow doubtful that discussion of limits and

Which, brings to the kernel of reason people starts to find evidence 
your trolling as well. . .

[snipped]

> Call me a troll. :-) You wouldn't be the first one.

So your behaviour is recurrent...

> 
>> Maybe you can understand
>> that I do not want to hear the continued promotion of OCaml and F# in a
>> lisp newsgroup.  (And, BTW, it is often a very brain-dead promotion,
>> because the guy deliberately refuses to understand other people arguments.)
> 
> My impression is, that there are two parties that don't want to
> understand. I have seen enough instances of JH listening to arguements
> that I wonder why the "problem" is so prevalent in c.l.l.

JH interest in 'listening' arguments to c.l.l is another very loaded 
assertion of yours. He's fishing here all the rebuttals c.l.l 
gratuitously offer him. What uses he makes of them is OT in this discussion.
From: Markus E.L.
Subject: Trolling, Request to shift the thread to c.l.f. completely: Was: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ahvec63r42.fsf_-_@hod.lan.m-e-leypold.de>
> Markus E.L. escreveu:
>>> ·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
> [snipped]
>
>> Behaviour? Perhaps just write to his teacher ... :-/. People, grow
>> up. This is usenet: The only way to enforce is not to respond. Since
>> c.l.l. obviously can't ignore JH's posts the problem AND there are
>> people who don't want them, this is a c.l.l. problem. And you will
>> have to live with it. And perhaps better not attack other people too,
>> who happen to defend Jon. And, last, to go back to the beginning of
>> these threads: No reason to bring your venom to c.l.f.
>
> Hey man! How inconsistent are willing to go? Your argument: "People,
> grow up. This is usenet:" _But_ if people respond to Jon's crossposted
> posts you childly complain: "No reason to bring your venom to c.l.f."
>
> What's the e-address of the teacher of yours?

Want to learn the technique? :-).

>>> before, his posts probably fit better for comp.lang.functional.  On the
>> Maybe. On the other side (without researching what Jon actaully
>> posted), I find it somehow doubtful that discussion of limits and
>
> Which, brings to the kernel of reason people starts to find evidence
> your trolling as well. . .

Yes, I can see that. C.l.l. is for gloryfiying Lisp. Nothing else. And
people who find that strange, are trolls. 

> [snipped]
>
>> Call me a troll. :-) You wouldn't be the first one.
>
> So your behaviour is recurrent...

Only rarely and only with an audience lacking insight or self
critique. Never mind. if you and your companions don't understand the
problem you yourself are (probably even to yourself and your own
goals), I can't help it. But commiseration from my side is rather
limited: If Jon is trolling (which I'm not intrested to judge) you
certainly seem to earn it.

:-)

>>> Maybe you can understand
>>> that I do not want to hear the continued promotion of OCaml and F# in a
>>> lisp newsgroup.  (And, BTW, it is often a very brain-dead promotion,
>>> because the guy deliberately refuses to understand other people arguments.)
>> My impression is, that there are two parties that don't want to
>> understand. I have seen enough instances of JH listening to arguements
>> that I wonder why the "problem" is so prevalent in c.l.l.
>
> JH interest in 'listening' arguments to c.l.l is another very loaded
> assertion of yours. He's fishing here all the rebuttals c.l.l
> gratuitously offer him. What uses he makes of them is OT in this
> discussion.

Is that assessment based on irrefutable facts or just on
_interpretation_?

Some of you people from the beat-JH-faction are rather
unrelaxed. Instead of ignoring him (if he was a troll that would be
your only practical option) you just insist on the rest of the world
sharing your assessment. And then you go to war with the people who
don't. I don't think that will really yield the intended result,
always supposing the goal is peace and harmony in c.l.l. and not war
against <whomever>.

But never mind: You are welcome to your community. Jon, in my opinion
is not a spammer (is not outside c.l.l.) but certainly a fool trying
to talk to such a self contained community as c.l.l. seems to be: The
effort is ill spent, and it's hardly thinkable that the resulting
flame wars will get him any customers there (if that should be the
purpose), quite the opposite.

Jon: As far as I'm concerned, You're always welcome to compare Lisp
against whatever at c.l.f. I think the various differences between
implementations of FPLs and the tradeoffs (historical or necessary)
are on topic in c.l.f. Im not speaking for the whole of c.l.f.

My request to Mark Tarver: Wouldn't the benchmarking rather belong to
c.l.f instead of being cross posted to both groups? It totally belongs
to c.l.f but Lispers might take offence at the presence of other
languages in the benchmark and perhaps also any discussions resulting
from that. On the other side interested parties can always subscribe
to c.l.f which hasn't got so much traffic.

Thanks for listening to all concerned.

Regards -- Markus
From: Jon Harrop
Subject: Re: Trolling, Request to shift the thread to c.l.f. completely: Was: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab784e$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> But never mind: You are welcome to your community. Jon, in my opinion
> is not a spammer (is not outside c.l.l.) but certainly a fool trying
> to talk to such a self contained community as c.l.l. seems to be: The
> effort is ill spent, and it's hardly thinkable that the resulting
> flame wars will get him any customers there (if that should be the
> purpose), quite the opposite.

When I post clarifications I'm not addressing the vocal die-hard Lispers who
struggle with arithmetic, I'm trying to help the newbies who are being
misled.

I'd do the same thing if I saw a child being taught scientific creationism,
for example.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Raffael Cavallaro
Subject: Re: Trolling, Request to shift the thread to c.l.f. completely: Was: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <200707281330268930-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-28 13:00:31 -0400, Jon Harrop <···@ffconsultancy.com> said:

> When I post clarifications I'm not addressing the vocal die-hard Lispers who
> struggle with arithmetic, I'm trying to help the newbies who are being
> misled.

In case there were any reasonable correspondents to this forum who were 
in any doubt as to Jon's purpose in comp.lang.lisp, this post makes it 
quite clear.

He characterizes die hard lispers as people who are so stupid as to 
'struggle' with arithmetic, implicitly suggesting that being so stupid, 
they couldn't possibly be reliable guides to appropriate programming 
tools.

He then goes on to state explicitly that his purpose is to target 
inexperienced correspondents to c.l.l..

Finally, he closes his post with a commercial advertisement for his 
consulting services in ocaml, a language c.l.l. is clearly not about.

So he posts off topic posts with commercial content with the explicit 
purpose of getting naive new readers of the forum to use his commercial 
services. This is the very definition of a spammer.

Jon is a usenet spammer. He can't find enough of an audience on a low 
traffic group that might be appropriate for ocaml such as 
comp.lang.functional, so he posts to c.l.l. He mischaracterizes long 
time lispers as idiots in the hope of deluding newcomers into believing 
that ocaml is some sort of programming nirvana. He suggests that the 
way to this nirvana is through his consulting services. One wonders how 
foolish a newcomer would have to be to fall for this, though having 
said that, thousands of people do click on spam ads for penis enlargers 
every day.
From: Ken Tilton
Subject: Re: Trolling, Request to shift the thread to c.l.f. completely: Was: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <s2Uqi.160$cM5.141@newsfe12.lga>
Jon Harrop wrote:
> When I post clarifications I'm not addressing the vocal die-hard Lispers who
> struggle with arithmetic, I'm trying to help the newbies who are being
> misled.
> 
> I'd do the same thing if I saw a child being taught scientific creationism,
> for example.

You'd tell them about the stork?

kenny

-- 
http://www.theoryyalgebra.com/

"Algebra is the metaphysics of arithmetic." - John Ray

"As long as algebra is taught in school,
there will be prayer in school." - Cokie Roberts

"Stand firm in your refusal to remain conscious during algebra."
    - Fran Lebowitz

"I'm an algebra liar. I figure two good lies make a positive."
    - Tim Allen
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ps2crgeo.fsf@ma-patru.mathematik.uni-karlsruhe.de>
·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:

> Call me a troll. :-) You wouldn't be the first one.

OK, thanks for making this clear.

Nicolas
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <g1fy362sjq.fsf@hod.lan.m-e-leypold.de>
> ·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:
>
>> Call me a troll. :-) You wouldn't be the first one.
>
> OK, thanks for making this clear.

You're welcome. The magic of names, you know: It's such a relieve to
call be people something and so practically change them into something
:-). Make for less muddled front-lines all around.

Regards -- Markus
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xczd4ye8agv.fsf@cbe.ericsson.se>
>>>>> "M.E.L." == Markus E L <·····································@ANDTHATm-e-leypold.de> writes:

  M.E.L.> In many cases the compiler will warn you about forgotten
  M.E.L.> cases -- something I imagine the Lisp systems must have
  M.E.L.> difficulties with owing to the dynamic type system.

In Erlang (which is dynamically typed), the compiler warns about
clauses that can never match, and a companion tool called Dialyzer
(part of Erlang/OTP) does dataflow analysis on programs, 
collecting type information, and warns about all sorts of things,
such as unreachable code, calling a function with incompatible types,
etc. Since the language is dynamic, even Dialyzer can't warn against
forgotten cases -- there is no template from which to judge that,
other than the derived signatures of the call sites. Thus, Dialyzer
can often tell whether a function is calling another using a type
that is not handled.

To illustrate with a simple (but fresh) example, the following
bug report came in a few days ago:

"Dialyzer issues a warning when matching the result of os:version:


   OSVersion = case os:version() of
      {Major, Minor, Release} ->
         lists:flatten(
            io_lib:format("~w.~w.~w",
                          [Major, Minor, Release]));
      VersionString ->
         VersionString
   end,


Dialyzer says on this code:
The variable VersionString can never match since previous
clauses completely covered the type 
{non_neg_integer(),non_neg_integer(),non_neg_integer()}


However, the documentation of os.erl says:
version() -> {Major, Minor, Release} | VersionString

Dialyzer or the documentation has it wrong."

Dialyzer is described e.g. in this document:
http://user.it.uu.se/~kostis/Papers/bugs05.pdf

A companion tool, TypEr is described here:
http://www.erlang.se/workshop/2005/TypEr_Erlang05.pdf

Clearly, one can perform quite advanced type analysis on 
dynamically typed languages.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <q1tzrq6qga.fsf@hod.lan.m-e-leypold.de>
>>>>>> "M.E.L." == Markus E L <·····································@ANDTHATm-e-leypold.de> writes:
>
>   M.E.L.> In many cases the compiler will warn you about forgotten
>   M.E.L.> cases -- something I imagine the Lisp systems must have
>   M.E.L.> difficulties with owing to the dynamic type system.
>
> In Erlang (which is dynamically typed), the compiler warns about
> clauses that can never match, and a companion tool called Dialyzer
> (part of Erlang/OTP) does dataflow analysis on programs, 
> collecting type information, and warns about all sorts of things,
> such as unreachable code, calling a function with incompatible types,
> etc. Since the language is dynamic, even Dialyzer can't warn against
> forgotten cases -- there is no template from which to judge that,
> other than the derived signatures of the call sites. Thus, Dialyzer
> can often tell whether a function is calling another using a type
> that is not handled.

Yes, I thought that a dynamically typed language can get that feature
with a data flow analyzer -- therefore I wrote "difficulties" instaed
of "impossible", the choice of words was intentional. 

If I see it right, data flow analysis is tightly related to automatic
type derivation. Indeed I like the idea that one can get an untyped
language and the type systems as external tools to be applied at will
and where needed. I understood Qi to be exactly such a system. The
only problem I see, is, that typing also directs the compiler in
optimization and choosing data representations (or should/does in a
language like ML). I don't see a simple way to do that with
"pluggable" systems, so the runtime would have to be like the runtime
of Scheme and support dynamic types. That is perhaps not a problem, I
don't know, but it's one of the points where I could imagine such a
system to fail / become inefficient. But I might be wrong. There is
certainly much research still to be done and it's a pity that the
adoption of new and better languages in the industry (not your
company, certainly) is so glacial.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab773e$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> If I see it right, data flow analysis is tightly related to automatic
> type derivation. Indeed I like the idea that one can get an untyped
> language and the type systems as external tools to be applied at will
> and where needed. I understood Qi to be exactly such a system. The
> only problem I see, is, that typing also directs the compiler in
> optimization and choosing data representations (or should/does in a
> language like ML).

Surely it is much easier to remove a static type system than to add one. I
would advocate an approach like .NET, where the run-time has a simple
static type system, giving good performance and conveying some static type
information between languages.

So real users can exploit sophisticated statically-typed languages that ride
on .NET to design spacecraft or cure cancer and the users who struggle with
maths can lash together their shopping trolleys for porn sites using
dynamically typed languages like IronPython.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ggo3$h6s$1@online.de>
Jon Harrop schrieb:
> Markus E.L. wrote:
>> If I see it right, data flow analysis is tightly related to automatic
>> type derivation. Indeed I like the idea that one can get an untyped
>> language and the type systems as external tools to be applied at will
>> and where needed. I understood Qi to be exactly such a system. The
>> only problem I see, is, that typing also directs the compiler in
>> optimization and choosing data representations (or should/does in a
>> language like ML).
> 
> Surely it is much easier to remove a static type system than to add one.

But you'll get a worse language than when you had started with a 
dynamically typed language. Static typing, like any other feature, 
limits design trade-offs, and you won't lose the bad sides of these 
trade-offs just by dropping the type system.

You'd also have to rewrite portions of the standard libraries to deal 
with the new situation.

It's very similar when going from dynamic to static typing.
I think as soon as you start to study such a transformation in detail, 
the difference in easiness quickly dwarves compared to the size of all 
the things that should be done!

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46af192b$0$1607$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> But you'll get a worse language than when you had started with a
> dynamically typed language. Static typing, like any other feature,
> limits design trade-offs, and you won't lose the bad sides of these
> trade-offs just by dropping the type system.
> 
> You'd also have to rewrite portions of the standard libraries to deal
> with the new situation.
> 
> It's very similar when going from dynamic to static typing.
> I think as soon as you start to study such a transformation in detail,
> the difference in easiness quickly dwarves compared to the size of all
> the things that should be done!

I think this is all completely wrong.

You won't get a "worse language" if you build on top of a statically typed
foundation, you'll get exactly the same language. The inclusion of a static
type system does not limit design trade-offs in the context of implementing
other languages on top of it. In terms of implementation, you just box all
values in an open sum type to circumvent the type system and you have
dynamic typing.

Statically typed libraries are trivial to use from a dynamic language. You
use the static type information to box all values, insert run-time checks
and then you ignore the static type information.

Finally, going from dynamic to static is wildly different. You cannot
recover the performance cost of dynamic typing and adding static typing
involves designing a static type system and implementing a static type
checker, which is a lot of work.

As an example, IronPython is a dynamically typed language that sits on a
statically typed CLR.

I should probably give some code examples as well. The static types
translate into boxed static types like this:

  int -> [ `Int of int ]
  string -> [ `String of string ]

and so on.

Boxing works like this:

# let int n = `Int n;;
val int : 'a -> [> `Int of 'a ] = <fun>

Run-time type checks during unboxing look like this:

# let unint = function `Int n -> n | _ -> invalid_arg "unint";;
val unint : [> `Int of 'a ] -> 'a = <fun>

The Fibonacci function translates from this:

# let rec fib = function
    | 0 | 1 as n -> n
    | n -> fib(n-1) + fib(n-2);;
val fib : int -> int = <fun>

into:

# let ( + ) n m = int(unint n + unint m);;
val ( + ) : [> `Int of int ] -> [> `Int of int ] -> [> `Int of int ] = <fun>
# let ( - ) n m = int(unint n - unint m);;
val ( - ) : [> `Int of int ] -> [> `Int of int ] -> [> `Int of int ] = <fun>
# let rec fib n =
    match unint n with
    | 0 | 1 as n -> int n
    | _ -> fib(n - int 1) + fib(n - int 2);;
val fib : [> `Int of int ] -> [> `Int of int ] = <fun>

For example:

# fib (int 35);;
- : [> `Int of int ] = `Int 9227465

To translate a standard library call like print_int into dynamically typed
form you take its signature:

# truncate;;
- : float -> int = <fun>

You unbox the arguments and box the result:

# let unfloat = function `Float x -> x | _ -> invalid_arg "unfloat";;
val unfloat : [> `Float of 'a ] -> 'a = <fun>

# let truncate x = int(truncate(unfloat x));;
val truncate : [> `Float of float ] -> [> `Int of int ] = <fun>

That's it. As you can see, circumventing OCaml's static type system is much
easier than writing Qi. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185813946.917543.11410@e9g2000prf.googlegroups.com>
On Jul 28, 9:55 am, Jon Harrop <····@ffconsultancy.com> wrote:
> So real users can exploit sophisticated statically-typed languages that ride
> on .NET to design spacecraft or cure cancer and the users who struggle with
> maths can lash together their shopping trolleys for porn sites using
> dynamically typed languages like IronPython.

Thus demonstrating that Harrop knows nothing about the complexity of
high-volume commerce.

Elitists always are ignorant about the things that they find beneath
them.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <4d7ioi2rs2.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. wrote:
>> If I see it right, data flow analysis is tightly related to automatic
>> type derivation. Indeed I like the idea that one can get an untyped
>> language and the type systems as external tools to be applied at will
>> and where needed. I understood Qi to be exactly such a system. The
>> only problem I see, is, that typing also directs the compiler in
>> optimization and choosing data representations (or should/does in a
>> language like ML).
>
> Surely it is much easier to remove a static type system than to add one. I

IMHO it's the other way round: Adding a static type system on top of a
language with a dynamic one practically amounts to a certain type of
data flow analysis ("this name can only be bound to int since we
tracked that it is only used as int" :-)). Of course, since the
program is still executed by a VM with a dynamic type system, there is
no gain in efficiency. But the additional type system could be seen as a
proof of correctness WRT to certain aspects.

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f8cjs8$rr$1@online.de>
Dan Bensen schrieb:
>  >>>> Jon Harrop escreveu:
>  >>>>> Pattern matching ... is the main reason why OCaml, SML,
>  >>>>> Haskell and F# are all much more concise than Common Lisp.
> 
>  >>> Cesar Rabak wrote:
>  >>>> I still find your comparison loaded: you rule out the use
>  >>>> of libraries for pattern matching in Lisp. Why?
> 
>  >> Dan Bensen escreveu:
>  >>> Because it sells books.
> 
> Markus E Leypold wrote:
>  > Actually I don't see:
>  >  1. Jon didn't hype his book in this thread.
> 
> First of all, Marcus, Jon's signature does advertise his book, so
> he has directly advertised his book in this thread simply by posting
> to it.

Now you're exaggerating.
I actually had to look at Jon's last post to even notice that he had a 
URL to his book in the sig.

 > As for "hyping", it's not necessary to promote the book
> directly.  It may be enough to draw programmers toward OCaml by making
> comparisons that are unfairly biased against Lisp, which Jon has been
> doing repeatedly.

I think both sides are guilty of presenting things in a favorable light.

In fact this is a long-standing gripe that I have been having about the 
Lisp community. Disadvantages are discussed far more openly in other 
communities (e.g. the Haskell community is quite open about the 
advantages and disadvantages of laziness).

Not that I don't understand that. If you keep taking unfair flak for 
decades (literally!), such a stance can easily evolve and solidify into 
common consensus.

Oh, and there's another big difference. Most discussions about relative 
advantages and disadvantages evolve into matter-of-belief statements. 
Jon is far more down to facts than that: He's giving benchmarks, making 
himself open to critique; he's adapting the benchmarks in response to 
the critique; he's counting lines, characters, or tokens and arguing his 
choice of criteria rationally.
You can agree or disagree with his findings, but he never asks you to 
believe.
Few Lispers typically argue on that level. I'm not in any Lisp 
community, but those Lispers that I have come across elsewhere have a 
noticeable affinity to belief-based arguments (and are generally taken 
less seriously as a consequence).

If these observations are true and relevant, then Jon must indeed come 
across as a troll, since he doesn't accept the typical Lisper's code of 
conduct. However, I think it's the code of conduct that's more to blame. 
(OK, I'll also admit that Jon can be rather rude and tends to generalize 
his findings a bit more than I'm inclined to follow. I'm not sure 
whether that's his normal tone or was provoked by stubborn disbelief 
reactions from the Lisp community.)

>  >  2. That someone is working in a given subject area X and actually is
>  >     making money from it -- is that disqualifying him from making
>  >     useful and true statements on usenet?
> 
> Markus, do you consider it "true and useful" to say that pattern-
> matching libraries are "Greenspunning"?  Do you think ML languages
> are "much more concise" than Lisp?  I don't think those statements
> are either true or useful.

 From what I know, at least the second statement is true.
I don't know what "Greenspunning" is, but those pattern-matching 
libraries that I have seen weren't very well-designed; also, I doubt 
that a library can reap all the benefits that built-in pattern-matching 
can, though I'm unsure how much of that doubt is well-founded. In other 
words, I tend to the position that lack of pattern matching might be a 
reasonable criticism of Lisp.

 > I don't think libraries and simple macros
> are Greenspunning, and I don't think the ML family is "much" more
> concise than Lisp when you take macros into account.

I have seen some of the things that can be made concise in Haskell, and 
it was outright amazing.
(If Haskell could marshall thunks, I'd be programming in the language 
right now.)

My ability to assess the usefulness of macros is slightly limited. 
However, I'm having trouble with limiting what a macro can do. In 
principle, a macro can cause its parameters to be evaluated once, not at 
all, or an arbitrary number of times; it could do the evaluation at 
expansion time or at evaluation time.
If Lisp were pure, it wouldn't matter when exactly what is evaluated, 
but then Lisp wouldn't need macros anyway :-)
Oh, and with standard macros from the standard libraries, these problems 
usually don't happen - I'd assume that every macro has a clear 
description when it evaluates what parameter, or it's easy to look that 
up. The problems start to arise when people write their own macros. (And 
with standard macros where you just *think* you had understood its 
behavior, and overlooked some special case which comes back to bite you, 
possibly years after you learned to use that macro. Or when different 
vendors use slightly different interpretations of the standard and the 
code starts to break.)

Just an outsider's view, possibly totally irrelevant and off the mark.

Regards,
Jo
From: Dan Bensen
Subject: Re: shootout: implementing an interpreter for a simple procedural       language Minim
Date: 
Message-ID: <f8cqos$nsf$1@wildfire.prairienet.org>
 > Dan Bensen schrieb:
 >> First of all, Marcus, Jon's signature does advertise his book, so
 >> he has directly advertised his book in this thread simply by posting
 >> to it.

Joachim Durchholz wrote:
 > Now you're exaggerating.

A little bit, but it's hard to be both exact and concise.

 >> As for "hyping", it's not necessary to promote the book
 >> directly.  It may be enough to draw programmers toward OCaml by
 >> making comparisons that are unfairly biased against Lisp, which
 >> Jon has been doing repeatedly.

 > I think both sides are guilty of presenting things in a
 > favorable light.

Yes, I agree with you.  The only defense I can offer is that
Lispers usually stay in their own back yard.

 > Oh, and there's another big difference. Most discussions about
 > relative advantages and disadvantages evolve into matter-of-belief
 > statements.  Jon is far more down to facts than that: He's giving
 > benchmarks, making himself open to critique; he's adapting the
 > benchmarks in response to the critique; he's counting lines,
 > characters, or tokens and arguing his choice of criteria rationally.

I'm getting tired of this thread, Joachim, so I'm not going to answer
in depth, but benchmarks don't address RAD, hot-loading code, and other
dynamic issues.  It's not fair to obsess over runtime performance in a
Lisp forum, because Lisp trades runtime optimization for many other
things.  Also, comments like
 >> Thats true, they teaching us Lisp on our university...
 >I really hope they teach you some good languages as well...
are insults, not facts.

 >> Markus, do you consider it "true and useful" to say that pattern-
 >> matching libraries are "Greenspunning"?  Do you think ML languages
 >> are "much more concise" than Lisp?  I don't think those statements
 >> are either true or useful.

 > From what I know, at least the second statement is true.
 > I don't know what "Greenspunning" is, but those pattern-matching
 > libraries that I have seen weren't very well-designed;

I don't know how good they are, but they can be fixed or replaced
if necessary.  Lisp already has excellent iteration and object systems
implemented as libraries.

 > My ability to assess the usefulness of macros is slightly limited.
 > However, I'm having trouble with limiting what a macro can do.
 > In principle, a macro can cause its parameters to be evaluated once,
 > not at all, or an arbitrary number of times; it could do the
 > evaluation at expansion time or at evaluation time.

No, macros are always called at expansion time, and they never evaluate
their arguments.  The only thing that evaluates expressions at eval time
is the code returned by the macro.

 > The problems start to arise when people write their own macros.

That depends on the individual.  Macros are very powerful, and in
experienced hands, they're plenty safe.

 > Just an outsider's view, possibly totally irrelevant and off the mark.

Always welcome. :)

-- 
Dan
www.prairienet.org/~dsb/
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural         language Minim
Date: 
Message-ID: <f8d33g$jmu$1@online.de>
Dan Bensen schrieb:
>  > Oh, and there's another big difference. Most discussions about
>  > relative advantages and disadvantages evolve into matter-of-belief
>  > statements.  Jon is far more down to facts than that: He's giving
>  > benchmarks, making himself open to critique; he's adapting the
>  > benchmarks in response to the critique; he's counting lines,
>  > characters, or tokens and arguing his choice of criteria rationally.
> 
> I'm getting tired of this thread, Joachim, so I'm not going to answer
> in depth, but benchmarks don't address RAD, hot-loading code, and other
> dynamic issues.  It's not fair to obsess over runtime performance in a
> Lisp forum, because Lisp trades runtime optimization for many other
> things.

Fully agreed that there are trade-offs involved, and that picking on 
just performance isn't exactly fair.

I also agree that Lisp was unsurpassed for RAD, hotloading, and a few 
other things, and for quite a long while.
Today, however, I'd say that Lisp isn't the only language it its various 
niches anymore.

>  > My ability to assess the usefulness of macros is slightly limited.
>  > However, I'm having trouble with limiting what a macro can do.
>  > In principle, a macro can cause its parameters to be evaluated once,
>  > not at all, or an arbitrary number of times; it could do the
>  > evaluation at expansion time or at evaluation time.
> 
> No, macros are always called at expansion time, and they never evaluate
> their arguments.  The only thing that evaluates expressions at eval time
> is the code returned by the macro.

Hmm... that sounds definitely un-Lispish to me. Surely you can combine 
macros, or pass parameters that are evaluated by the macro (or when 
calling the macro) to control what the macro does?

Regards,
Jo
From: Larry Clapp
Subject: Re: shootout: implementing an interpreter for a simple procedural         language Minim
Date: 
Message-ID: <slrnfakb8k.46j.larry@theclapp.homelinux.com>
On 2007-07-27, Joachim Durchholz <··@durchholz.org> wrote:
> Dan Bensen schrieb:
>>  > My ability to assess the usefulness of macros is slightly
>>  > limited.  However, I'm having trouble with limiting what a macro
>>  > can do.  In principle, a macro can cause its parameters to be
>>  > evaluated once, not at all, or an arbitrary number of times; it
>>  > could do the evaluation at expansion time or at evaluation time.
>> 
>> No, macros are always called at expansion time, and they never
>> evaluate their arguments.  The only thing that evaluates
>> expressions at eval time is the code returned by the macro.
>
> Hmm... that sounds definitely un-Lispish to me. Surely you can
> combine macros, or pass parameters that are evaluated by the macro
> (or when calling the macro) to control what the macro does?

You guys are talking past each other.  Dan's talking about macros from
a strict technical sense and Joachim is talking not only about that,
but also about the code a macro generates.

  (defmacro example (a b c d)
    `(progn
	,b
	,c
	,c
	(eval (progn ,@d))))

This (generates code that) ignores A, evaluates B once, C twice, and D
at runtime.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural         language Minim
Date: 
Message-ID: <f8dsns$i1c$2@online.de>
Larry Clapp schrieb:
> On 2007-07-27, Joachim Durchholz <··@durchholz.org> wrote:
>> Dan Bensen schrieb:
>>>  > My ability to assess the usefulness of macros is slightly
>>>  > limited.  However, I'm having trouble with limiting what a macro
>>>  > can do.  In principle, a macro can cause its parameters to be
>>>  > evaluated once, not at all, or an arbitrary number of times; it
>>>  > could do the evaluation at expansion time or at evaluation time.
> 
> You guys are talking past each other.  Dan's talking about macros from
> a strict technical sense and Joachim is talking not only about that,
> but also about the code a macro generates.
> 
>   (defmacro example (a b c d)
>     `(progn
> 	,b
> 	,c
> 	,c
> 	(eval (progn ,@d))))
> 
> This (generates code that) ignores A, evaluates B once, C twice, and D
> at runtime.

Ah, thanks.
Then my original assumptions do indeed hold.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa1cba$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Hmm... that sounds definitely un-Lispish to me. Surely you can combine
> macros, or pass parameters that are evaluated by the macro (or when
> calling the macro) to control what the macro does?

A macro is just a term rewriter, like Mathematica, they accept trees and
spit out trees.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6530B8.18372527072007@news-europe.giganews.com>
In article <························@ptn-nntp-reader02.plus.net>,
 Jon Harrop <···@ffconsultancy.com> wrote:

> Joachim Durchholz wrote:
> > Hmm... that sounds definitely un-Lispish to me. Surely you can combine
> > macros, or pass parameters that are evaluated by the macro (or when
> > calling the macro) to control what the macro does?
> 
> A macro is just a term rewriter, like Mathematica, they accept trees and
> spit out trees.

Paul Graham describes in his book 'On Lisp' the usage
of Common Lisp macros in detail. The book is now quite
expensive, but there is a free PDF you can download:

http://www.paulgraham.com/onlisp.html
http://www.paulgraham.com/onlisptext.html

-- 
http://lispm.dyndns.org
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Q9qqi.21090$j7.379621@news.indigo.ie>
Joachim Durchholz wrote:
> Hmm... that sounds definitely un-Lispish to me. Surely you can combine
> macros, or pass parameters that are evaluated by the macro (or when
> calling the macro) to control what the macro does?
> 

Well, there's some confusion here.  Of course you can pass parameters
to a macro - it's indeed  up to the macro what the heck it does with the
arguments.  But things in argument position for macros are not
evaluated and then passed to the macro like things in argument position
for functions are before being passed to the function.

-i.e. in (+ (* 2 2) moocow), + the function is going to see 4 and the
value of moocow passed to it.

in (m (* 2 2) moocow), macro m is called (at macroexpansion time) with
the list (* 2 2) and the symbol moocow.

Macros are considered to be "for" transforming code trees, and they
should output valid code trees when used in that capacity for obvious
reasons, but interpretation of their inputs and what the heck they
might actually do are not tightly constrained.
Fun link - Oleg Kiselyov enjoys doing particularly strange things:
http://okmij.org/ftp/Scheme/misc.html#lazy-stream 
"The second article demonstrates that Scheme/CL macro-expansion models
the non-strict computation of Haskell functions."
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <64hcnq6pt1.fsf@hod.lan.m-e-leypold.de>
> Dan Bensen schrieb:
>> Markus E Leypold wrote:
>>  > Actually I don't see:
>>  >  1. Jon didn't hype his book in this thread.
>> First of all, Marcus, Jon's signature does advertise his book, so
>> he has directly advertised his book in this thread simply by posting
>> to it.
>
> Now you're exaggerating.
> I actually had to look at Jon's last post to even notice that he had a
> URL to his book in the sig.

Me too. What indeed was the reason for my reply.

> I think both sides are guilty of presenting things in a favorable light.

That's certainly true. But, of course, that (alone) doesn't make people trolls
or spammers.

> In fact this is a long-standing gripe that I have been having about
> the Lisp community. Disadvantages are discussed far more openly in
> other communities (e.g. the Haskell community is quite open about the
> advantages and disadvantages of laziness).

That is exactly the impression I've been forming recently. Of course
I'm not often around at c.l.l. or other Lispish places, so I won't
know. 


<... and the rest ...>

All very true. :-)

Regards -- Markus (who's been posting too much today).
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9ee3d$0$1627$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> In fact this is a long-standing gripe that I have been having about the
> Lisp community. Disadvantages are discussed far more openly in other
> communities (e.g. the Haskell community is quite open about the
> advantages and disadvantages of laziness).

Indeed, and this is precisely what concerns me. The Lisp community have
evolved a variety of stock responses designed to make Lisp look good that
are flawed in subtle ways. It takes quite some effort to figure out why
they are wrong and I think it is a real shame that so many people new to
functional programming end up wasting their time with Lisp when they could
be so much more productive using one of the modern FPLs.

This is exactly the same situation that scientific creationists have wiggled
into, particularly in the US. They say "evolution is a myth" and cite an
enormous number of pseudo-scientific reasons trying to undermine the theory
of evolution. That is actually a more interesting debate because the
responses of the scientific creationists has actually evolved over time.
Anyway, I digress.

First, you'll notice that Lispers only ever compare Lisp to Java. Comparing
to OCaml, Haskell or even C# is strictly prohibited on c.l.l. My ray tracer
elucidates why:

  http://www.ffconsultancy.com/languages/ray_tracer/results.html

Java is the only mainstream language that is as slow and verbose as Lisp.

When arguing with C programmers they used to quote Greenspun's Tenth Rule:

  "Any sufficiently complicated C program contains an ad-hoc, ill-
specified, slow, bug-ridden reimplementation of half of Common Lisp."

particularly with regard to the reimplementation of the list data structure
in C programs. Yet they disregard pattern matching (a core feature of all
modern FPLs) as something that they can trivially lash together (ad-hoc)
without saying exactly what it will do (ill-specified), disregarding the
overwhelming quantitative evidence of bad performance (slow) or having
studied the static verification of patterns (bug-ridden) or even caring
that it is not usefully encompassing (only half of ML).

This isn't so bad. But when you consider the depth and breadth of the
misinformation spread by the Lisp community it is really quite disturbing.
I think it is time to bury some of these myths.

A Lisper once wrote a paper on Lisp, comparing it only to Java:

  http://p-cos.net/documents/dynatype.pdf

The author can say "Lisp is better than Java", which is fine. But he can't
say:

  "However, statically typed languages require programmers to deal with all
methods of an interface at once..." -- page 3.

Just a simple mistake I thought. So I wrote to him, clarifying the mistake,
giving a counter example written in OCaml and explaining that inference
solved that problem many years ago. I expected the paper to be altered but,
instead, it was published and I received the response:

  "Thanks for your interest in improving that paper, but at the moment I  
have no intention to make any changes to it."

Take from that what you will.

> From what I know, at least the second statement is true.
> I don't know what "Greenspunning" is, but those pattern-matching
> libraries that I have seen weren't very well-designed; also, I doubt
> that a library can reap all the benefits that built-in pattern-matching
> can, though I'm unsure how much of that doubt is well-founded.

We've been gathering evidence for some time and it all agrees with your
assertion. Lisp can't support pattern matching in the generically-useful
way that SML, OCaml, Haskell and F# already do and pattern matchers written
in Lisp remain slower.

> My ability to assess the usefulness of macros is slightly limited.
> However, I'm having trouble with limiting what a macro can do. In
> principle, a macro can cause its parameters to be evaluated once, not at
> all, or an arbitrary number of times; it could do the evaluation at
> expansion time or at evaluation time.

A macro is simply a very rudimentary form of term rewriter and they are of
limited utility in any language. OCaml's camlp4 macros handle arbitrary
lazy streams, designed to be token streams, and provide an extensible form
of pattern matching. In Lisp, macros are restricted to handling only
s-exprs.

Doing any significant term rewriting without a pattern matcher is seriously
tedious.

Here is another example I recently stumbled upon. Take this code from
Maxima:

(defun bessel-k-simp (exp ignored z)
  (declare (ignore ignored))
  (let ((order (simpcheck (cadr exp) z))
        (rat-order nil))
    (let* ((arg (simpcheck (caddr exp) z)))
      (cond ((and (>= (signum1 order) 0) (bessel-numerical-eval-p order
arg))
             ;; A&S 9.6.6
             ;; K[-v](x) = K[v](x)
             (bessel-k (abs (float order)) (complex ($realpart arg) ($imagpart
arg))))
            ((mminusp order)
             ;; A&S 9.6.6
             ;; K[-v](x) = K[v](x)
             (resimplify (list '(%bessel_k) `((mtimes) -1 ,order) arg)))
            ((and $besselexpand
                  (setq rat-order (max-numeric-ratio-p order 2)))
             ;; When order is a fraction with a denominator of 2, we
             ;; can express the result in terms of elementary
             ;; functions.
             ;;
             ;; K[1/2](z) = sqrt(2/%pi/z)*exp(-z) = K[1/2](z)
             (bessel-k-half-order rat-order arg))
            (t
             (eqtest (list '(%bessel_k) order arg)
                     exp))))))

and translate it into any language with pattern matching. Note that the
pattern matches are actually written in the Lisp as comments but the
authors never bothered to pull in a pattern matching library or Greenspun
one themselves.

There is 84kLOC of such code in Maxima. How much shorter and faster would it
be if it were represented in terms of pattern matching? As the Lispers
always say, it is theoretically possible to do a good job but...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87vec5afh2.fsf@geddis.org>
Jon Harrop <···@ffconsultancy.com> wrote on Fri, 27 Jul 2007:
> I think it is a real shame that so many people new to functional
> programming end up wasting their time with Lisp when they could be so much
> more productive using one of the modern FPLs.

But Common Lisp is not intended to be a "Functional Programming Language".
If that's the primary interest of a new programmer, and they settle on
Common Lisp, perhaps they've simply chosen the wrong community.

Common Lisp _is_ intended to be a tool for general programming, to write any
kind of algorithm.  If a new programmer has that goal, and instead narrowly
focuses only on "Functional Programming Languages", they have probably made
a similar mistake.

> A macro is simply a very rudimentary form of term rewriter and they are of
> limited utility in any language.

This is the kind of rude and unsupported statement that you continue to make,
despite the numerous attempts of Lisp folks to educate you on how macros are
actually used (and greatly valued) in Lisp.  Yet you persist, refusing (and
not even interested in) being educated.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
--------- if you cut here, you'll probably destroy your monitor ----------
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa3e29$0$1626$ed2619ec@ptn-nntp-reader02.plus.net>
Don Geddis wrote:
>> A macro is simply a very rudimentary form of term rewriter and they are
>> of limited utility in any language.
> 
> This is the kind of rude and unsupported statement...

Macros are a long way from a dedicated rewriter like a theorem prover or
computer algebra package.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185569531.430691.44980@d30g2000prg.googlegroups.com>
On Jul 27, 11:40 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Don Geddis wrote:
> >> A macro is simply a very rudimentary form of term rewriter and they are
> >> of limited utility in any language.
>
> > This is the kind of rude and unsupported statement...
>
> Macros are a long way from a dedicated rewriter like a theorem prover or
> computer algebra package.

Macros are also a long way from a banana.

Harrop may believe that macros "a very rudimentary form of term
rewriter", but he's wrong.  Macros are a very powerful form of CODE
rewriter.  They can easily do things that would be difficult with a
theorem prover, algebra package, or banana.

Similarly, one can do things with a banana that would be difficult to
do with a macro.

Harrop has a bad case of "when all you have is a hammer, everything
looks like a nail".

Harrop is a blub.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aa7caa$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
Andy Freeman wrote:
> Harrop may believe that macros "a very rudimentary form of term
> rewriter", but he's wrong.

Then you'll be able to translate this Mathematica term rewriter into a
similarly-elegant Lisp macro. This finds all sublists that are flanked by
the same element:

  ReplaceList[{a, b, c, a, d, b, d}, {___, x_, y__, x_, ___} -> g[x, {y}]]
  {g[a, {b, c}], g[b, {c, a, d}], g[d, {b}]}

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185584068.609230.39490@i38g2000prf.googlegroups.com>
On Jul 27, 4:06 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > Harrop may believe that macros "a very rudimentary form of term
> > rewriter", but he's wrong.
>
> Then you'll be able to translate this Mathematica term rewriter into a
> similarly-elegant Lisp macro.

Harrop "forgot" to quote the very next sentence, where I explained
what lisp macros are.  I'll do so.

>>Harrop may believe that macros "a very rudimentary form of term
>>rewriter", but he's wrong.  Macros are a very powerful form of CODE
>>rewriter.

If Harrop believes that code and terms are the same thing, that's just
one more thing that he's wrong about.

Interestingly enough, the next two sentence answer the argument Harrop
is trying to make.

>> ...      They can easily do things that would be difficult with a
>>theorem prover, algebra package, or banana.
>>
>>Similarly, one can do things with a banana that would be difficult to
>>do with a macro.

In other words, theorem provers are good at different things than code
rewriters.  Too bad Harrop's tool is only good at one thing.

> This finds all sublists that are flanked by the same element:

Why would one need, or even use, a code rewriter for that
functionality?  Heck, why does one even need a term rewriter?  The
following simple python suffices.  (Note that it can be used in more
circumstances than Harrop's obscurity and can be turned into a
generator.)

class Matched:
    def __init__(self):
        self._accumulators = {}
        self.completed = {}

    def addSeq(self, seq):
        for e in seq:
            for k in self._accumulators:
                self._accumulators[k].append(e)
            if e in self._accumulators:
                # p is useful for an generator version
                p = self._accumulators[e][:-1]
                self.completed.setdefault(e, []).append(p)
                # yield p
            self._accumulators[e] = []
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u3az96qmy.fsf@nhplace.com>
Andy Freeman <······@earthlink.net> writes:

> On Jul 27, 4:06 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> > Andy Freeman wrote:
> > > Harrop may believe that macros "a very rudimentary form of term
> > > rewriter", but he's wrong.
> >
> > Then you'll be able to translate this Mathematica term rewriter into a
> > similarly-elegant Lisp macro.
> 
> Harrop "forgot" to quote the very next sentence, where I explained
> what lisp macros are.

Indeed.  In Lisp, the notion of a "macro" is just "any program that
takes forms as input and yields forms as output" ... BUT applied at
compile time to forms that are presented for semantic processing
(evaluation or compilation).  That is, a function such as:

(defun shrug (list)
  (loop for (x . sublist-and-more) on list
        for more = (member x sublist-and-more)
        when more
         collect `(g ,x ,(ldiff sublist-and-more more))))
=> SHRUG

(shrug '(a b c a d b d))
=> ((G A (B C)) (G B (C A D)) (G D (B)))

For this to be a macro, you'd want to have done something to get it called
at semantic processing time, as in:

(defmacro shrug-too (&rest list)
  `',(shrug list))
=> SHRUG-TOO

(shrug-too a b c a d b d)
=> ((G A (B C)) (G B (C A D)) (G D (B)))

Of course, I didn't decide the boring nature of this "macro".
Spicing it up slightly...

(defun hohum (list &key (op 'op) (transform #'identity))
  (loop for (x . sublist-and-more) on list
        for more = (member x sublist-and-more)
        when more
          collect `(,op ,(funcall transform x)
                        ,(funcall transform (ldiff sublist-and-more more)))))
=> HOHUM

(hohum '(a b c a d b d) :op 'g)
=> ((G A (B C)) (G B (C A D)) (G D (B)))

(defmacro hohum-too (&rest forms)
  (flet ((quotify (x) `',x))
   `(flet ((show (x y) (print (list y 'between x))))
      ,@(hohum forms :op 'show :transform #'quotify)
      ',forms)))
=> HOHUM-TOO

(hohum-too a b c a d b d x e d)

((B C) BETWEEN A) 
((C A D) BETWEEN B) 
((B) BETWEEN D) 
((X E) BETWEEN D) 
=> (A B C A D B D X E D)

(pprint (macroexpand-1 '(hohum-too a b c a d b d x e d)))

(FLET ((SHOW (X Y) (PRINT (LIST Y 'BETWEEN X))))
  (SHOW 'A '(B C))
  (SHOW 'B '(C A D))
  (SHOW 'D '(B))
  (SHOW 'D '(X E))
  '(A B C A D B D X E D))

In general, Lisp programs would not tend to do this kind of thing at macro
expansion time, since that limits the utility of the operation to patterns
to be rewritten that are known at compile time (and hence are not application
data).  We call things that operate on data at runtime "functions", as Andy
correctly observes.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab13ca$0$1633$ed2619ec@ptn-nntp-reader02.plus.net>
Andy Freeman wrote:
> > ReplaceList[{a, b, c, a, d, b, d}, {___, x_, y__, x_, ___} -> g[x, {y}]]
>
> class Matched:
>     def __init__(self):
>         self._accumulators = {}
>         self.completed = {}
> 
>     def addSeq(self, seq):
>         for e in seq:
>             for k in self._accumulators:
>                 self._accumulators[k].append(e)
>             if e in self._accumulators:
>                 # p is useful for an generator version
>                 p = self._accumulators[e][:-1]
>                 self.completed.setdefault(e, []).append(p)
>                 # yield p
>             self._accumulators[e] = []

As you can see, the dedicated term rewriter is better at rewriting terms.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <878x908qgu.fsf@geddis.org>
Jon Harrop <···@ffconsultancy.com> wrote on Sat, 28 Jul 2007:
> As you can see, the dedicated term rewriter is better at rewriting terms.

Which surprises no one.  Similarly, a dedicated graphics library will be
better for implementing graphics programs.

Some of us (but not you, apparently) are aware that there is more to
computation than just term rewriting.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If a kid asks where rain comes from, I think a cute thing to tell him is "God
is crying."  And if he asks why God is crying, another cute thing to tell him
is "Probably because of something you did."  -- Deep Thoughts, by Jack Handey
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uk5skh40v.fsf@nhplace.com>
Don Geddis <···@geddis.org> writes:

> Some of us ... are aware that there is more to computation than just
> term rewriting.

Which is why CL focuses on providing tools that allow people to modify
what they get out-of-the-box in order to bootstrap an environment they
like to program in.

If the poster in question had spent half as much time learning the
language and implementing tools, instead of simply criticizing the
language groundlessly, he'd by now have had plenty of time to
implement a very nice term rewriting library of his own ... and could
be offering it for sale to the vast numbers of people who have been
holding off in their use of Lisp until that important linguistic gap
was filled.  (I'd rush to do it myself, but I'd hate to rob someone of
such a hard-earned ticket to fame and fortune.)
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab8791$0$1595$ed2619ec@ptn-nntp-reader02.plus.net>
Kent M Pitman wrote:
> If the poster in question had spent half as much time learning the
> language and implementing tools, instead of simply criticizing the
> language groundlessly, he'd by now have had plenty of time to
> implement a very nice term rewriting library of his own ... and could
> be offering it for sale to the vast numbers of people who have been
> holding off in their use of Lisp until that important linguistic gap
> was filled.  (I'd rush to do it myself, but I'd hate to rob someone of
> such a hard-earned ticket to fame and fortune.)

I did this two years ago in OCaml. Took four days to write, sold in under a
week.

Richard Fateman did this about 100 years ago in Lisp. Took him many years to
write. He got sued.

Disclaimer: I don't think Richard was actually sued for choosing to use
Lisp.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185812958.261963.233530@j4g2000prf.googlegroups.com>
On Jul 28, 2:51 am, Jon Harrop <····@ffconsultancy.com> wrote:
> As you can see, the dedicated term rewriter is better at rewriting terms.

Was it?  It's not obvious that the term rewriter solution was correct,
how it works, or how to make it work in other situations.  For
example, the list elements may have special properties that an
ordinary implementation can exploit by replacing the hash table with a
specialized data structure.  Or, maybe the sublists have interesting
properties that can be exploited by replacing the accumulator lists.
The term-rewriter, no such luck.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5guaa9F3e3jopU1@mid.individual.net>
Jon Harrop wrote:

> A Lisper once wrote a paper on Lisp, comparing it only to Java:
> 
>   http://p-cos.net/documents/dynatype.pdf

...which would be me.

> The author can say "Lisp is better than Java", which is fine.

I don't say that in that paper. More specifically, I am not advocating 
Lisp or Scheme in that paper at all.

> But he can't say:
> 
>   "However, statically typed languages require programmers to deal with all
> methods of an interface at once..." -- page 3.
> 
> Just a simple mistake I thought. So I wrote to him, clarifying the mistake,
> giving a counter example written in OCaml and explaining that inference
> solved that problem many years ago.

Quoting that sentence out of context is a misrepresentation of the 
paper. I state the following very clearly in the introduction:

"This paper mainly focuses on shortcomings in Java (again, because of 
the workshop theme) which is only one example of a statically typed 
programming language. There are others that may have different and 
actually better solutions by default. But I hope this serves as a good 
starting point for a more objective analysis of an otherwise usually 
very heatedly discussed topic."

> I expected the paper to be altered but,
> instead, it was published and I received the response:
> 
>   "Thanks for your interest in improving that paper, but at the moment I  
> have no intention to make any changes to it."
> 
> Take from that what you will.

The main reason is lack of time. Since the paper is very clearly 
presented as experimental work, and is in no way misleading about its 
actual merits, I see no reason to change or withdraw it. It has indeed a 
provocative tone, but that's because the workshop to which I submitted 
it asked for provocative position statements.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8gdus$175$1@aioe.org>
Jon Harrop escreveu:
> Joachim Durchholz wrote:
>> In fact this is a long-standing gripe that I have been having about the
>> Lisp community. Disadvantages are discussed far more openly in other
>> communities (e.g. the Haskell community is quite open about the
>> advantages and disadvantages of laziness).
> 
> Indeed, and this is precisely what concerns me. The Lisp community have
> evolved a variety of stock responses designed to make Lisp look good that
> are flawed in subtle ways. It takes quite some effort to figure out why
Jon, you seem to be building quite quickly an assorted bag of stock
responses as well. . .

> they are wrong and I think it is a real shame that so many people new to
> functional programming end up wasting their time with Lisp when they could
> be so much more productive using one of the modern FPLs.

and this is the point others are trying to explain you: discussing the
advantages of a designed for FPL  for _functional_ _programming_ in
c.l.l is not a point, given Lisp is a general purpose language.

> 
> This is exactly the same situation that scientific creationists have wiggled
> into, particularly in the US. They say "evolution is a myth" and cite an
> enormous number of pseudo-scientific reasons trying to undermine the theory
> of evolution. That is actually a more interesting debate because the
> responses of the scientific creationists has actually evolved over time.
> Anyway, I digress.

No. This is worse than a digression, this another fallacy called
"ignoratio elenchi".

> 
> First, you'll notice that Lispers only ever compare Lisp to Java. Comparing
> to OCaml, Haskell or even C# is strictly prohibited on c.l.l. My ray tracer
> elucidates why:
> 
>   http://www.ffconsultancy.com/languages/ray_tracer/results.html
> 

Again, another fallacy: Lisp is more than a language to implement a Ray
Tracer.

> Java is the only mainstream language that is as slow and verbose as Lisp.

Again a loaded argument. Java and Lisp are touted as general purpose
languages.

> 
> When arguing with C programmers they used to quote Greenspun's Tenth Rule:
> 
>   "Any sufficiently complicated C program contains an ad-hoc, ill-
> specified, slow, bug-ridden reimplementation of half of Common Lisp."
> 
> particularly with regard to the reimplementation of the list data structure
> in C programs. Yet they disregard pattern matching (a core feature of all
> modern FPLs) as something that they can trivially lash together (ad-hoc)
> without saying exactly what it will do (ill-specified), disregarding the
> overwhelming quantitative evidence of bad performance (slow) or having
> studied the static verification of patterns (bug-ridden) or even caring
> that it is not usefully encompassing (only half of ML).

Another very fallacious paragraph, filled with a lot of emotional
content and very little essence. . .


Let's for a moment just see this affirmation: "pattern matching (a core
feature of allmodern FPLs)"

Do you understand that production programms, instead of your toy
benchmark problems posed for the amusement of lispers, do not expend
100% of their time _just_ doing pattern matching, and so Lisp has an
approach where its performance is acceptable?

Do you understand that in the case a system would require a even better
performance (be it on parttern matching or whatever) Lisp has a FFI to
cope with that?
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8gh5o$he0$1@online.de>
Cesar Rabak schrieb:
> Let's for a moment just see this affirmation: "pattern matching (a core
> feature of allmodern FPLs)"
> 
> Do you understand that production programms, instead of your toy
> benchmark problems posed for the amusement of lispers, do not expend
> 100% of their time _just_ doing pattern matching, and so Lisp has an
> approach where its performance is acceptable?

Pattern matching isn't just a tool for efficiency.

It's also a tool for quickly and safely deconstructing tagged unions of 
structures, something that's quite common in any language (including 
Lisp, C, and anything else that I have seen). I think it's roughly at 
the same abstraction and usefulness level as arrays - not a silver 
bullet, not quite an abstract data type, but still a very useful and 
trusty tool that every craftsman should have in his toolset.

I'd say that the second use far outweighs the first use. In Lisp, it 
would be something of a design pattern, supported by some library code, 
possibly with the help of some compiler or macro magic to get things 
really fast - but that's secondary, pattern matching is really more 
about programming style.

Regards,
Jo
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <vc4pjm486k.fsf@hod.lan.m-e-leypold.de>
> Let's for a moment just see this affirmation: "pattern matching (a core
> feature of allmodern FPLs)"
>
> Do you understand that production programms, instead of your toy
> benchmark problems posed for the amusement of lispers, do not expend
> 100% of their time _just_ doing pattern matching, and so Lisp has an
> approach where its performance is acceptable?

But you (Cesar) also understand that there is more to having pattern
matching in the language itself than just only performance?

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8lkim$ht3$1@aioe.org>
Markus E.L. escreveu:
>> Let's for a moment just see this affirmation: "pattern matching (a core
>> feature of allmodern FPLs)"
>>
>> Do you understand that production programms, instead of your toy
>> benchmark problems posed for the amusement of lispers, do not expend
>> 100% of their time _just_ doing pattern matching, and so Lisp has an
>> approach where its performance is acceptable?
> 
> But you (Cesar) also understand that there is more to having pattern
> matching in the language itself than just only performance?
> 
I undertand the sentence but don't see any intelligence conveyed in it, 
so let me test my understanding: you agree that pattern matching is not 
a single feature that can make a "core feature" of any language of all?

> Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6mpqxja.fsf@ruckus.brouhaha.com>
Cesar Rabak <·······@yahoo.com.br> writes:
> I undertand the sentence but don't see any intelligence conveyed in
> it, so let me test my understanding: you agree that pattern matching
> is not a single feature that can make a "core feature" of any language
> of all?

That sentence is not even grammatical, and I can't tell what it means.
But pattern matching is a core feature of Haskell and it's the only
way that code gets executed.  The only way to force an evaluation is
to pattern-match the expression against something.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <gq1wep5v7z.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. escreveu:
>>> Let's for a moment just see this affirmation: "pattern matching (a core
>>> feature of allmodern FPLs)"
>>>
>>> Do you understand that production programms, instead of your toy
>>> benchmark problems posed for the amusement of lispers, do not expend
>>> 100% of their time _just_ doing pattern matching, and so Lisp has an
>>> approach where its performance is acceptable?
>> But you (Cesar) also understand that there is more to having pattern
>> matching in the language itself than just only performance?
>>
> I undertand the sentence but don't see any intelligence conveyed in
> it, so let me test my understanding: you agree that pattern matching
> is not a single feature that can make a "core feature" of any language
> of all?
^^^^^ Sorry, around here I have a parse error, so I cannot answer to
your question directly. But I can try to clarify my question. What I
said (or intentended to say) was:

- You tried (as I understand) to deemphasize the pattern matching by
  pointing out that real programs don't do pattern matching all of
  their time (a statement which in istelf should examined carefully
  since pattern matching replaces most conditional statements in
  languages which have pattern matching in the core language), so not
  having pattern matching in the core language won't slow real
  programs down much.

- You seemed to imply that the reason to have pattern matching in the
  core language is baiscally speed / efficiency (and if that aspect is
  deemphasized, the argument for pattern matching in the core language
  would break down).

- To that I replied that (in form of question) that there are other
  reason beyond speed / efficiency why one would want to have pattern
  matching in the core language (Jons fixation on speed benchmarks
  notwithstanding).

Does that make it clearer?

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46aff28f$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> - You tried (as I understand) to deemphasize the pattern matching by
>   pointing out that real programs don't do pattern matching all of
>   their time (a statement which in istelf should examined carefully
>   since pattern matching replaces most conditional statements in
>   languages which have pattern matching in the core language), so not
>   having pattern matching in the core language won't slow real
>   programs down much.

More importantly, pattern matching can replace destructuring, and that is
even more common than dispatch.

> - You seemed to imply that the reason to have pattern matching in the
>   core language is baiscally speed / efficiency (and if that aspect is
>   deemphasized, the argument for pattern matching in the core language
>   would break down).
> 
> - To that I replied that (in form of question) that there are other
>   reason beyond speed / efficiency why one would want to have pattern
>   matching in the core language (Jons fixation on speed benchmarks
>   notwithstanding).

You might like to note that the ray tracer implementations with pattern
matching (OCaml, SML, Haskell, F#) are the shortest and those without are
the longest (Lisp, Java).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sdd3az4woy6.fsf@shell01.TheWorld.com>
Cesar Rabak <·······@yahoo.com.br> writes:

> Markus E.L. escreveu:
>>> Let's for a moment just see this affirmation: "pattern matching (a core
>>> feature of allmodern FPLs)"
>>>
>>> Do you understand that production programms, instead of your toy
>>> benchmark problems posed for the amusement of lispers, do not expend
>>> 100% of their time _just_ doing pattern matching, and so Lisp has an
>>> approach where its performance is acceptable?
>> But you (Cesar) also understand that there is more to having pattern
>> matching in the language itself than just only performance?
>>
> I undertand the sentence but don't see any intelligence conveyed in
> it, so let me test my understanding: you agree that pattern matching
> is not a single feature that can make a "core feature" of any language
> of all?

You have mis-read the sentence.  The sentence says that one does
pattern matching for more reasons than just performance.  While you
are reading it as a "less than" proposition.  That is, you are reading
the sentence as saying one wants more than performance out of pattern
matching, thus the performance of pattern matching must not be
important.  However, you are neglecting to see that something else
(leveraging the static type system to assure correctness by compiler
verification) is the more important reason and performance is just an
additional benefit.  Since you have no interest in the correctness
advantages of a static type system, it is not surprising you missed
that benefit--it is not a benefit for you.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8o51u$uc4$1@aioe.org>
Chris F Clark escreveu:
> Cesar Rabak <·······@yahoo.com.br> writes:
> 
>> Markus E.L. escreveu:
>>>> Let's for a moment just see this affirmation: "pattern matching (a core
>>>> feature of allmodern FPLs)"
>>>>
>>>> Do you understand that production programms, instead of your toy
>>>> benchmark problems posed for the amusement of lispers, do not expend
>>>> 100% of their time _just_ doing pattern matching, and so Lisp has an
>>>> approach where its performance is acceptable?
>>> But you (Cesar) also understand that there is more to having pattern
>>> matching in the language itself than just only performance?
>>>
>> I undertand the sentence but don't see any intelligence conveyed in
>> it, so let me test my understanding: you agree that pattern matching
>> is not a single feature that can make a "core feature" of any language
>> of all?
> 
> You have mis-read the sentence.  The sentence says that one does
> pattern matching for more reasons than just performance.  While you
> are reading it as a "less than" proposition.  That is, you are reading
> the sentence as saying one wants more than performance out of pattern
> matching, thus the performance of pattern matching must not be
> important.  However, you are neglecting to see that something else
> (leveraging the static type system to assure correctness by compiler
> verification) is the more important reason and performance is just an
> additional benefit.  Since you have no interest in the correctness
> advantages of a static type system, it is not surprising you missed
> that benefit--it is not a benefit for you.
OK. We agree on what we disagree!
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <91sl74juq9.fsf@hod.lan.m-e-leypold.de>
> Cesar Rabak <·······@yahoo.com.br> writes:
>
>> Markus E.L. escreveu:
>>>> Let's for a moment just see this affirmation: "pattern matching (a core
>>>> feature of allmodern FPLs)"
>>>>
>>>> Do you understand that production programms, instead of your toy
>>>> benchmark problems posed for the amusement of lispers, do not expend
>>>> 100% of their time _just_ doing pattern matching, and so Lisp has an
>>>> approach where its performance is acceptable?
>>> But you (Cesar) also understand that there is more to having pattern
>>> matching in the language itself than just only performance?
>>>
>> I undertand the sentence but don't see any intelligence conveyed in
>> it, so let me test my understanding: you agree that pattern matching
>> is not a single feature that can make a "core feature" of any language
>> of all?
>
> You have mis-read the sentence.  The sentence says that one does
> pattern matching for more reasons than just performance.  While you
> are reading it as a "less than" proposition.  That is, you are reading
> the sentence as saying one wants more than performance out of pattern
> matching, thus the performance of pattern matching must not be
> important.  However, you are neglecting to see that something else
> (leveraging the static type system to assure correctness by compiler
> verification) is the more important reason and performance is just an
> additional benefit.  Since you have no interest in the correctness
> advantages of a static type system, it is not surprising you missed
> that benefit--it is not a benefit for you.

Yes, that pretty much summarizes the point I wanted to make, but
unfortunately spoils the learning effect by giving the answer to "what
else could be important?" away. :-)

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46adca67$0$1594$ed2619ec@ptn-nntp-reader02.plus.net>
Cesar Rabak wrote:
> Jon, you seem to be building quite quickly an assorted bag of stock
> responses as well. . .

Yes.

> Do you understand that production programms, instead of your toy
> benchmark problems posed for the amusement of lispers, do not expend
> 100% of their time _just_ doing pattern matching,

In OCaml/Haskell/SML/F#, none of the benchmarks spend a significant amount
of time doing pattern matching.

> and so Lisp has an approach where its performance is acceptable?

Can you make a testable hypothesis? For what computationally-intensive tasks
is pattern matching irrelevant and Lisp's performance acceptable?

> Do you understand that in the case a system would require a even better
> performance (be it on parttern matching or whatever) Lisp has a FFI to
> cope with that?

I agree that the FFI can solve Lisp's performance problems...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185836515.235430.95600@z28g2000prd.googlegroups.com>
On Jul 30, 4:15 am, Jon Harrop <····@ffconsultancy.com> wrote:
> In OCaml/Haskell/SML/F#, none of the benchmarks spend a significant amount
> of time doing pattern matching.

Yet, Harrop's F# examples are little more than pattern matching.

Are they spending time in code he doesn't show us?  Does the overhead
around pattern matching dominate?

If pattern matching time isn't important, then being 2-10x slower at
pattern matching isn't likely to be important.  (Say pattern matching
takes 2 min for a program that runs for 100 minutes.  Increasing it
10x changes that to 20 minutes out of 118 minutes, or less than 20%
slower.)

-andy
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8llrf$sda$1@aioe.org>
Jon Harrop escreveu:
> Cesar Rabak wrote:
>> Jon, you seem to be building quite quickly an assorted bag of stock
>> responses as well. . .
> 
> Yes.
> 
>> Do you understand that production programms, instead of your toy
>> benchmark problems posed for the amusement of lispers, do not expend
>> 100% of their time _just_ doing pattern matching,
> 
> In OCaml/Haskell/SML/F#, none of the benchmarks spend a significant amount
> of time doing pattern matching.

You make another very irritating move when cutting the relevant part of 
discussion, and change subject. . . this is not the expected behaviour 
from whom at every other post ask for 'provable and testable this and 
that'...

Your phrase above is non sequitur. You obviously are not interested in 
listening our point: Lisp is a general purpose, multiparadigm, 
programming language. Obviously there are trade offs, which are 
acceptable to the Lisp community.

> 
>> and so Lisp has an approach where its performance is acceptable?
> 
> Can you make a testable hypothesis? For what computationally-intensive tasks
> is pattern matching irrelevant and Lisp's performance acceptable?

I think the burden of proof is with the challenging technology! Hint, 
hint: if you could show a production grade programm written in Lisp 
rewritten to your favorite language to have any business value, I bet 
people will pay for it!

> 
>> Do you understand that in the case a system would require a even better
>> performance (be it on parttern matching or whatever) Lisp has a FFI to
>> cope with that?
> 
> I agree that the FFI can solve Lisp's performance problems...
> 

Great.
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8lorj$m8g$1@registered.motzarella.org>
Cesar Rabak schrieb:
> Jon Harrop escreveu:
>> I agree that the FFI can solve Lisp's performance problems...
>>
> 
> Great.

I think Lisp has no performance problems.
There are compilers that don't produce as fast code as the
OCaml compiler does.
Anyway, for more performance the compiler writers will come up with
new solutions. In the end each compilation of any programming
language will result in the same execution speed - as soon compilers
with near human intelligence will take over.


Andr�
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46af1d1b$0$1614$ed2619ec@ptn-nntp-reader02.plus.net>
Cesar Rabak wrote:
> I think the burden of proof is with the challenging technology! Hint,
> hint: if you could show a production grade programm written in Lisp
> rewritten to your favorite language to have any business value, I bet
> people will pay for it!

You might like to look at some of the commercial users of other FPLs, like
OCaml.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural       language Minim
Date: 
Message-ID: <5gulkkF3hvv0pU1@mid.individual.net>
Joachim Durchholz wrote:

> In fact this is a long-standing gripe that I have been having about the 
> Lisp community. Disadvantages are discussed far more openly in other 
> communities (e.g. the Haskell community is quite open about the 
> advantages and disadvantages of laziness).

This is incorrect. Off the top of my head, I can think of the following 
issues that have been criticized openly in the Lisp community:

- Advantages and disadvantages (!) of macros
- Lisp-1 vs. Lisp-2
- presence and absence of full or partial continuations
- the multiple meanings of NIL
- iteration vs. recursion
- the CLOS Metaobject Protocol
- absence and presence of more reflective features, especially 
first-class environments
- the role of symbols
- package vs. module systems
- the size of the language: Common Lisp is often considered either too 
large or too small, depending on the discussant
- the fragmentation of the community (both Lisp in general and Common 
Lisp and Scheme specifically)
- the lack of a formal institutionalized process for further 
standardization for Common Lisp

This only captures a subset of typical discussions. For a current 
example of open criticism, you could also look at the discussion 
archives for the R6RS Scheme report.

Some of these issues are actually important.

Purported lack of efficiency and purportedly missing pattern matchers 
are usually not among the discussed issues.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f8g3nq$dip$1@registered.motzarella.org>
Joachim Durchholz schrieb:
> Now you're exaggerating.
> I actually had to look at Jon's last post to even notice that he had a 
> URL to his book in the sig.

Right, I also think that this is very visible.
One Lisper who published his book in April 2005 made also lot's of
postings, of which some were very explicit, and he wasn't considered
a spammer. His book actually is very good (it is a Lisp book).


> Oh, and there's another big difference. Most discussions about relative 
> advantages and disadvantages evolve into matter-of-belief statements. 
> Jon is far more down to facts than that: He's giving benchmarks, making 
> himself open to critique; he's adapting the benchmarks in response to 
> the critique; he's counting lines, characters, or tokens and arguing his 
> choice of criteria rationally.

I don't see Jon as a spammer or troll.
But on this point you are not completely correct.
He himself is caught at least sometimes in these believes.
I explained to him like eleven times (also in private mails) that this
counting lets Lisp in many cases of trivial programs appear in a bad
light, and that some other mechanism could make more sense.
For example counting "tokens" or however we want to call it.
If we do that then suddenly Lisp code is not much longer than an OCaml
or Haskell equivalent, but suddenly in the same league.

For me the most important quote that was ever made about Lisp:
"Lisp isn't a language, it's a building material."
(Dr. Alan Kay)

Lisp is a more general programming language in some sense than most of
the others that exist. It is more a building material out of which you
construct very nice solutions to problems. It only does not always work
out for simple code. When I do simple code and introduce a macro its
definition lengthens the code file. That it would be used 48 times in
a "real project" was pointed out to Jon. Each use of it reduces the
token count and therewith code complexity, for example in opposit to
his code. But well, altough I explained it (and others explained
different things as too) he continues to not implement these new facts
into his arguments.

About speed:
OCaml was specifically designed to make very runtime efficient programs.
That came at some costs, check: http://www.podval.org/~sds/ocaml-sucks.html
Anyway, Lisp can in fact compete with OCaml, although Jon does not
admit it. His idea that OCaml usually runs 2-10 times faster is not
always so true as he believes. In an email I said:

when I look at
http://www.ffconsultancy.com/free/ray_tracer/images/perf_all_amd64.gif
I see...
...for white color:
   ocamlopt is ca at 0.86 speedwise
   the fastest sbcl at 0.61 speedwise
So the Lisp code runs 29,06% slower.

...for red color:
   ocamlopt is at 0.6
   sbcl is at 0.43
Here too Lisp runs with 43/0,6 = 71,6% of OCamls speed, making it
therewith 28,3% slower.

On 64-Bit system a Lisp (language family) system, Stalin, outperformed
his OCaml code.
And yes: these Lisp programs have a higher LOC. This however is not
very meaningful. In bigger programs, where some techniques are reused
the macros (that brought speed optimizations) would also reduce the
code complexity. OCaml is simply too optimized to beat it in its domain.
Short programs will in most cases be faster in OCaml (compared to Lisp
or Haskell). If Lisp wants to compete it needs some extra techniques.
If the program itself is short code reuse can't begin to help, so the
line count gets worse. Altough not always the token count has to be
worse.

Others pointed out that his benchmark mostly measured the Garbage
Collectors. The one in OCaml is better designed as the one of the
Lisps implementation that went into the comparison, at least for this
task.
But pointing out things does not always work, as Jon sadly continues
to state things although he was corrected.
This does not make him a troll. But he surely is doing sometimes, what
you criticized: living in a world of matter-of-belief statements.


> I don't know what "Greenspunning" is, but those pattern-matching 
> libraries that I have seen weren't very well-designed;

Which were the ones you have seen?


> I have seen some of the things that can be made concise in Haskell, and 
> it was outright amazing.

I totally agree. When Haskell is working in its domain it is more or
less impossible for other languages to compete.


> My ability to assess the usefulness of macros is slightly limited.

Increase your knowlegde about Macros.
Read the first 9 chapters of this book:
http://www.gigamonkeys.com/book/
And your understanding will improve. You can do it in two days.


> However, I'm having trouble with limiting what a macro can do. In 
> principle, a macro can cause its parameters to be evaluated once, not at 
> all, or an arbitrary number of times; it could do the evaluation at 
> expansion time or at evaluation time.

A macro is more or less a compiler. It is doing source code transformation.
It is a function: it takes Lisp code and produces Lisp code.
The resulting code gets compiled. When a Lisp program is running there
are no macros anymore.. in some sense, because a Lisp program is always
running.


> If Lisp were pure, it wouldn't matter when exactly what is evaluated, 
> but then Lisp wouldn't need macros anyway :-)

I don't think this is true.
But: Lisp does not need macros to be turing complete.
It needs them to be able to do all what Haskell can do and additionally
create a well integrated syntax for domain specific languages.
Also it can do speed optimizations.


> Oh, and with standard macros from the standard libraries, these problems 
> usually don't happen - I'd assume that every macro has a clear 
> description when it evaluates what parameter, or it's easy to look that 
> up. The problems start to arise when people write their own macros. (And 
> with standard macros where you just *think* you had understood its 
> behavior, and overlooked some special case which comes back to bite you, 
> possibly years after you learned to use that macro. Or when different 
> vendors use slightly different interpretations of the standard and the 
> code starts to break.)
> 
> Just an outsider's view, possibly totally irrelevant and off the mark.

Well, you are coming closer. But macros are still not completely
dis-mystified for you. So your views on them might not always be how they
really work.


Andr�
-- 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural       language Minim
Date: 
Message-ID: <f8gica$ilg$1@online.de>
Andr� Thieme schrieb:
> Joachim Durchholz schrieb:
>> Oh, and there's another big difference. Most discussions about 
>> relative advantages and disadvantages evolve into matter-of-belief 
>> statements. Jon is far more down to facts than that: He's giving 
>> benchmarks, making himself open to critique; he's adapting the 
>> benchmarks in response to the critique; he's counting lines, 
>> characters, or tokens and arguing his choice of criteria rationally.
> 
> I don't see Jon as a spammer or troll.
> But on this point you are not completely correct.
> He himself is caught at least sometimes in these believes.

Possibly.
Nobody is perfect.
I actually found him guilty of launching quite nasty ad hominem attacks 
today. "Spammer" is still wrong (that's plain ridiculous), but there's a 
strong streak of trolldom in his dealing in c.l.l. It's probably more a 
temper grown out of hand rather than trolldom, so he's not a troll by 
intention, but his behaviour seems troll-like enough to make people foam 
at the mouth.

Well, enough analysis.

> Lisp is a more general programming language in some sense than most of
> the others that exist. It is more a building material out of which you
> construct very nice solutions to problems. It only does not always work
> out for simple code. When I do simple code and introduce a macro its
> definition lengthens the code file. That it would be used 48 times in
> a "real project" was pointed out to Jon. Each use of it reduces the
> token count and therewith code complexity, for example in opposit to
> his code. But well, altough I explained it (and others explained
> different things as too) he continues to not implement these new facts
> into his arguments.

The problem with macros is that they do not do information hiding.

That's why I said that Lisp is more a programming language laboratory 
rather than a language suited for production programming. (That doesn't 
make it entirely unsuitable for the task, of course, and you can whip up 
pretty amazing things with it anyway. I know that this statement is too 
strong to stand up.)

> On 64-Bit system a Lisp (language family) system, Stalin, outperformed
> his OCaml code.

Stuff like Stalin works only if you drop major parts of what makes Lisp 
strong, of course. I don't think you can do much run-time loading in a 
Stalin-compiled Lisp program, can you? (Even if you could, the linkage 
would not have the optimization of Stalin.)

I.e. the problem with Lisp (as with most dynamically-loading languages) 
is that a lot of optimizations need to be done via whole-system analysis.

>> I don't know what "Greenspunning" is, but those pattern-matching 
>> libraries that I have seen weren't very well-designed;
> 
> Which were the ones you have seen?

Sorry - it's been several months, and I don't remember the details 
anymore. This topic was discussed at that time, and I was referred to 
some libraries. A few looks at the code... well, underwhelmed me.

Probably because nobody in the Lisp community cared enough to make these 
libraries really good. Which may be blindness on the Lisp side, or 
simple lack of need - difficult to tell from the outside ;-)

>> My ability to assess the usefulness of macros is slightly limited.
> 
> Increase your knowlegde about Macros.
> Read the first 9 chapters of this book:
> http://www.gigamonkeys.com/book/
> And your understanding will improve. You can do it in two days.

OK, that seems a reasonable proposal.

Once I find the time though - there are two other papers sitting on my 
reading list.

>> However, I'm having trouble with limiting what a macro can do. In 
>> principle, a macro can cause its parameters to be evaluated once, not 
>> at all, or an arbitrary number of times; it could do the evaluation at 
>> expansion time or at evaluation time.
> 
> A macro is more or less a compiler. It is doing source code transformation.
> It is a function: it takes Lisp code and produces Lisp code.
> The resulting code gets compiled. When a Lisp program is running there
> are no macros anymore.. in some sense, because a Lisp program is always
> running.

So the ability to break abstraction barriers, and no guarantees about 
when or how often the arguments will be evaluated.

>> If Lisp were pure, it wouldn't matter when exactly what is evaluated, 
>> but then Lisp wouldn't need macros anyway :-)
> 
> I don't think this is true.
> But: Lisp does not need macros to be turing complete.

No, of course not, but that's beside the point.

> It needs them to be able to do all what Haskell can do and additionally
> create a well integrated syntax for domain specific languages.

Haskell does all of this without macros.

> Also it can do speed optimizations.

Yes, there's no simple way to optimize Haskell programs.
(Haskellers tend to say that simple optimizations should be done by the 
compiler anyway, that complex optimizations can't be done without 
profiling and serious effort, and that it's still possible to get things 
fast in Haskell. I'm undecided how much water this argument will hold, 
but it's not entirely unreasonable - Haskell compilers do some very 
advanced kinds of optimization, including code transformations that 
eliminate the construction and destruction of entire data structures.)

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ac6fa1$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
>> On 64-Bit system a Lisp (language family) system, Stalin, outperformed
>> his OCaml code.
> 
> Stuff like Stalin works only if you drop major parts of what makes Lisp
> strong, of course.

Exactly. You might as well pretend that SML is Lisp and cite MLton's
performance...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185813707.242217.193210@z28g2000prd.googlegroups.com>
On Jul 28, 4:10 pm, Joachim Durchholz <····@durchholz.org> wrote:
> The problem with macros is that they do not do information hiding.

Huh?

> >> However, I'm having trouble with limiting what a macro can do. In
> >> principle, a macro can cause its parameters to be evaluated once, not
> >> at all, or an arbitrary number of times; it could do the evaluation at
> >> expansion time or at evaluation time.

Or it can cause it "paramaters" to be evaluated in a different
context, or it can treat some of them as variable declarations.

Many programming languages have constructs that are "redundant" in the
sense that their semantics is expressable in the language, but are
neverthless quite useful because doing these constructs are far more
usable than "the roll your own" versions.  Some of these constructs
can be provided by standard functions and/or classes.  Others require
syntax.

Macros let lispers define syntax.

> > A macro is more or less a compiler. It is doing source code transformation.
> > It is a function: it takes Lisp code and produces Lisp code.
> > The resulting code gets compiled. When a Lisp program is running there
> > are no macros anymore.. in some sense, because a Lisp program is always
> > running.
>
> So the ability to break abstraction barriers, and no guarantees about
> when or how often the arguments will be evaluated.

Or, the ability to create abstraction barriers.  (Consider python's
with statement, added in 2.6.)

> > It needs them to be able to do all what Haskell can do and additionally
> > create a well integrated syntax for domain specific languages.
>
> Haskell does all of this without macros.

Really?  Haskell folks working in a given domain never write similar
code multiple places?

-andy
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8fupg$ufd$1@registered.motzarella.org>
Dan Bensen schrieb:

> Markus, do you consider it "true and useful" to say that pattern-
> matching libraries are "Greenspunning"?  Do you think ML languages
> are "much more concise" than Lisp?  I don't think those statements
> are either true or useful.  I don't think libraries and simple macros
> are Greenspunning, and I don't think the ML family is "much" more
> concise than Lisp when you take macros into account.

Dan, can you tell me in what language the first ML was programmed in?
And in what language the first OCaml was programmed in?
And in what language the first Haskell was programmed in?
And in what language the first Pattern Matchers were used?


Andr�
-- 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab84a0$0$1590$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Dan, can you tell me in what language the first ML was programmed in?
> And in what language the first OCaml was programmed in?
> And in what language the first Haskell was programmed in?
> And in what language the first Pattern Matchers were used?

You may also be interested in what people are using these days...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8gcg2$4vm$1@registered.motzarella.org>
Jon Harrop schrieb:
> Andr� Thieme wrote:
>> Dan, can you tell me in what language the first ML was programmed in?
>> And in what language the first OCaml was programmed in?
>> And in what language the first Haskell was programmed in?
>> And in what language the first Pattern Matchers were used?
> 
> You may also be interested in what people are using these days...

Well... Java, C, C++, C#, Visual Basic and PHP.
I think this is a point where most of us sit in one boat.
Probably a good number of readers of cll and clf prefer to not
code in these langs, if the don't have to.


Andr�
-- 
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m28x8zris7.fsf@hanabi.local.i-did-not-set--mail-host-address--so-tickle-me>
Andr� Thieme <······························@justmail.de> writes:

> Dan, can you tell me in what language the first ML was programmed in?
> And in what language the first OCaml was programmed in?
> And in what language the first Haskell was programmed in?
> And in what language the first Pattern Matchers were used?

Andr�, can you tell me what the first Lisp was programmed in?

No, don't answer.  I'm just pointing out the irrelevance of your
questions.  MLs are no longer programmed in that language, and neither
are OCaml or Haskell.  (What what would THAT tell you if you believed
the answers to your questions to tell you something?)

Finally, many things have been done in Lisp first -- which in part is
due to the fact that Lisp was one of the more decent languages of its
time, but certainly also due to the fact that many of the languages
that we use today for these things hadn't been invented back then.
(After all, Lisp was one of the very first programming languages in
existence.)

Cheers,
Matthias
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8hiah$e9m$1@registered.motzarella.org>
Matthias Blume schrieb:
> Andr� Thieme <······························@justmail.de> writes:
> 
>> Dan, can you tell me in what language the first ML was programmed in?
>> And in what language the first OCaml was programmed in?
>> And in what language the first Haskell was programmed in?
>> And in what language the first Pattern Matchers were used?
> 
> Andr�, can you tell me what the first Lisp was programmed in?
> 
> No, don't answer.  I'm just pointing out the irrelevance of your
> questions.  MLs are no longer programmed in that language, and neither
> are OCaml or Haskell.  (What what would THAT tell you if you believed
> the answers to your questions to tell you something?)

Hello Mathias, please allow me to clear up what my message was:
It was said that Pattern Matching in Lisp would be "greenspunning".
However, it seems to me that Pattern Matching was done in Lisp before
many other programming languages were adopting it.


Andr�
-- 
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6mrd2fj.fsf@ruckus.brouhaha.com>
Andr� Thieme <······························@justmail.de> writes:
> Hello Mathias, please allow me to clear up what my message was:
> It was said that Pattern Matching in Lisp would be "greenspunning".
> However, it seems to me that Pattern Matching was done in Lisp before
> many other programming languages were adopting it.

I don't think that helps.  Assembly language was greenspunned before
Lisp was implemented.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ac703a$0$1620$ed2619ec@ptn-nntp-reader02.plus.net>
Paul Rubin wrote:
> I don't think that helps.  Assembly language was greenspunned before
> Lisp was implemented.

Exactly. You might as well say that the parts of the first jet engine were
assembled using a wrench and, therefore, the wrench is better than the jet
engine.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007072910045327544-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-29 06:38:20 -0400, Jon Harrop <···@ffconsultancy.com> said:

> You might as well say that the parts of the first jet engine were
> assembled using a wrench and, therefore, the wrench is better than the jet
> engine.

lisp:wrench::ocaml:jet engine

riiiight....

hint: lisp is *already* a programming language. You can't fault it for 
not doing pattern matching when pattern matching was done in lisp 
first, and there exist a range of pattern matchers for common lisp in 
current use.
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1gwinzbiz8fn1$.iljqm13ttcwa.dlg@40tude.net>
Raffael Cavallaro wrote:

> lisp:wrench::ocaml:jet engine

I like wrenches. With a wrench you can build a jet engine, but with a jet
engine you can't build another jet engine.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Raymond Wiker
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2tzrnp8h8.fsf@RawMBP.local>
Frank Buss <··@frank-buss.de> writes:

> Raffael Cavallaro wrote:
>
>> lisp:wrench::ocaml:jet engine
>
> I like wrenches. With a wrench you can build a jet engine, but with a jet
> engine you can't build another jet engine.

	I'd like to see you build a jet engine with only a wrench...
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-C4E80F.17231729072007@news-europe.giganews.com>
In article <·······························@40tude.net>,
 Frank Buss <··@frank-buss.de> wrote:

> Raffael Cavallaro wrote:
> 
> > lisp:wrench::ocaml:jet engine
> 
> I like wrenches. With a wrench you can build a jet engine, but with a jet
> engine you can't build another jet engine.

Well, Lisp software has been used to help designing Jet engines
(especially ICAD).
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8iq2f$ei0$1@aioe.org>
Rainer Joswig escreveu:
> In article <·······························@40tude.net>,
>  Frank Buss <··@frank-buss.de> wrote:
> 
>> Raffael Cavallaro wrote:
>>
>>> lisp:wrench::ocaml:jet engine
>> I like wrenches. With a wrench you can build a jet engine, but with a jet
>> engine you can't build another jet engine.
> 
> Well, Lisp software has been used to help designing Jet engines
> (especially ICAD).
But... just hold your breath a little that an Ocaml solution in a 
screenful of lines will be posted here that will outperform ICad and 
probably will led to bankrupcy the ones dare not switch soon enough!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad262c$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
Frank Buss wrote:
> I like wrenches. With a wrench you can build a jet engine, but with a jet 
> engine you can't build another jet engine.

Absolutely. I also wouldn't try to write programs using only a pattern
matcher in a language that wasn't Turing complete.

The interesting choice is between an old wrench with a lifetime supply of
free grease or a new wrench with a safety catch and a free jet engine and
some wings.

I am curious as to how people built the first jet engine and how modern-day
jet engines are built. I have toyed with an old wrench, read papers on jet
engine design and even built a jet engine using only a wrench. Mine isn't
as good as Matthias' but it got me where I needed to go and very
efficiently, thanks to the wonderful literature written by people like him.

Andre has stated repeatedly for seven months that old wrenches can be used
to build anything and that he could build a jet engine in only 1 week.

Dan dipped his wrench in kerosene, lit it and threw it. He claims that this
is effectively a jet engine but, objectively, he has yet to throw his
wrench anywhere near the speed of a jet plane. I am still trying to
determine whether or not he was being serious.

We have all had a go at building three different kinds of model aeroplane
now. We even measured weight and speed over a sample trip. Pascal
ingeniously circumvented the problem statement by using an aircraft carrier
to move his plane nearer to the finish line and then measured only the
flight time. Except for that, the planes built using premade jet engines
were consistently superior, being both lighter and faster. In fact, almost
all modern aeroplanes use jets and we even know that jet aeroplanes fly all
over the world.

Cesar is laughing at the idea that a jet-based model aeroplane might inspire
OAPs who still travel by bus. He may have a point.

Rainer noted that the wrench predates the jet engine and just referred to
the fact that an old wrench was once used in the construction of a jet
engine.

Matthias once used only an old wrench to build one of the world's most
advanced jet engines. He contributed enormously to mankind's knowledge of
jet engines. Yet, despite his vast intellect, he is trying to explain jet
engine design to a band of wrench-wielding cavemen who aren't really
listening.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073000114950878-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-29 19:32:12 -0400, Jon Harrop <···@ffconsultancy.com> said:

> a band of wrench-wielding cavemen

If that is what you think of lispers then an elegant solution presents itself:

go away.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad0778$0$1587$ed2619ec@ptn-nntp-reader02.plus.net>
Raffael Cavallaro wrote:
> hint: lisp is *already* a programming language. You can't fault it for
> not doing pattern matching when pattern matching was done in lisp
> first,

Are you quite sure the first pattern matcher was written in Lisp and not,
say, assembler or Fortran?

> and there exist a range of pattern matchers for common lisp in current
> use. 

What significant Lisp projects pull in pattern matching libraries?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073000144877923-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-29 17:23:38 -0400, Jon Harrop <···@ffconsultancy.com> said:

> What significant Lisp projects pull in pattern matching libraries?

This is a lose lose for you Jon. If there are any, then you lose 'cause 
a significant lisp project used a lisp pattern matcher. If there 
aren't, then you lose 'cause all the significant work done in lisp 
didn't need pattern matching, and it isn't the be-all and end-all you 
make it out to be.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xfy36y0vq.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> > What significant Lisp projects pull in pattern matching libraries?
> 
> This is a lose lose for you Jon. If there are any, then you lose
> 'cause a significant lisp project used a lisp pattern matcher. If
> there aren't, then you lose 'cause all the significant work done in
> lisp didn't need pattern matching, and it isn't the be-all and end-all
> you make it out to be.

I thought it was a legit question.  If there's none, then lispers are
reinventing the wheel over and over, sort of a Greenspun 11th law.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073000394538165-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 00:24:41 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> If there's none, then lispers are
> reinventing the wheel over and over, sort of a Greenspun 11th law.

No, 'cause this assumes that pattern matching is somehow essential to 
all programming projects. There's not need to 'reinvent' something if 
it isn't actually necessary to the solution.

The fact, often repeated here and equally often ignored by Jon, is that 
there exist several pattern matching libraries in common lisp. The 
average lisper wouldn't bother reinventing pattern matching - certainly 
I wouldn't - I'd just load an existing library and get on with what I 
need/want it for.

Again, this has be rehashed several times in just the last year. Search 
google groups in c.l.l. and you'll see several examples of how simple 
it is to load and use an existing lisp pattern matcher. Certainly 
ocml's built in pattern matcher is faster, but unless all your code on 
all your projects spends an overwhelming majority of its time in 
pattern matching then choosing ocaml over lisp for this reason is just 
a very narrow case of premature optimization.
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ib0u$g91$1@registered.motzarella.org>
Jon Harrop schrieb:
> Paul Rubin wrote:
>> I don't think that helps.  Assembly language was greenspunned before
>> Lisp was implemented.
> 
> Exactly. You might as well say that the parts of the first jet engine were
> assembled using a wrench and, therefore, the wrench is better than the jet
> engine.

Lisp is more a wrench with which you can build everything.
OCaml is a wrench that is more specialized. There are a few things one
can build with the OCaml wrench that can be build with fewer steps than
with the Lisp wrench, as long these objects one builds are small.
If these objects get too complicated the limited OCaml wrench is not
specialized anymore, while the Lisp wrench allows its users to spend
some time on specializing it further.
I am certain that you now understood that OCaml is only a specialized
wrench, good for a small amount of tasks. You also understand that the
OCaml wrench forces its users to work on each problem with the same
paradigm, how ever unfittig it might be.


Andr�
-- 
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2k5sjcivu.fsf@my.address.elsewhere>
Andr� Thieme <······························@justmail.de> writes:

> Lisp is more a wrench with which you can build everything.
> OCaml is a wrench that is more specialized. There are a few things one
> can build with the OCaml wrench that can be build with fewer steps than
> with the Lisp wrench, as long these objects one builds are small.
> If these objects get too complicated the limited OCaml wrench is not
> specialized anymore, while the Lisp wrench allows its users to spend
> some time on specializing it further.
>
> I am certain that you now understood that OCaml is only a specialized
> wrench, good for a small amount of tasks. You also understand that the
> OCaml wrench forces its users to work on each problem with the same
> paradigm, how ever unfittig it might be.

Maybe Jon does, but I don't.  Can you explain this again?  "Small
amounts of tasks"?  What are you talking about?  Reading this one gets
the impression that Ocaml fills a niche, while there are vast amounts
of things one inherently cannot do in it.  I'd like to see some examples.

(Notice that your answer should not refer to specific techniques like
"you cannot write self-modifying code", or "you cannot make a list
that contains "three" and 3 without defining a datatype first" etc.
Those are means, not ends.  What are specific software end-products,
for which one would have to ditch Ocaml in favor of Lisp?)

Have you ever used Ocaml, or SML, or Haskell for a large program? Are
you speaking from experience?  I myself came from a Lisp background
and ended up ditching Lisp in favor of ML.  Actually, as your original
post points out, the entire ML community effectively comes from a Lisp
background but ditched it in favor of their respective new language.
Same goes for the Haskell community.  And I don't see these people
coming back to Lisp when the going gets tough.  If Lisp were this
all-powerful tool that, unlike other tools, can do everything, why
have a lot of very smart people flocked away from it?  Why are they
unlikely to come back?

Matthias
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8ig4a$1bg$1@registered.motzarella.org>
Matthias Blume schrieb:
> Andr� Thieme <······························@justmail.de> writes:
> 
>> Lisp is more a wrench with which you can build everything.
>> OCaml is a wrench that is more specialized. There are a few things one
>> can build with the OCaml wrench that can be build with fewer steps than
>> with the Lisp wrench, as long these objects one builds are small.
>> If these objects get too complicated the limited OCaml wrench is not
>> specialized anymore, while the Lisp wrench allows its users to spend
>> some time on specializing it further.
>>
>> I am certain that you now understood that OCaml is only a specialized
>> wrench, good for a small amount of tasks. You also understand that the
>> OCaml wrench forces its users to work on each problem with the same
>> paradigm, how ever unfittig it might be.
> 
> Maybe Jon does, but I don't.  Can you explain this again?  "Small
> amounts of tasks"?  What are you talking about?  Reading this one gets
> the impression that Ocaml fills a niche, while there are vast amounts
> of things one inherently cannot do in it.

Sorry for my speling erors, I should proof read more often :-)

I said OCaml is not specialized anymore. I did not say that it is
impossible to do these things.
While OCaml is specialized on functional programming (with huge
trade offs for speed, see http://www.podval.org/~sds/ocaml-sucks.html )
Lisp is specialized on specializing, plus some more things.

What OCaml has over Lisp:
Static typing, one namespace, implicit currying and pattern matching.
What the OCaml system has over most Lisp implementations:
speed of execution.

Static typing can be done in Lisp as a Library (see Qi). I personally
would not want to use that feature. Most parts of interesting programs
are kinda dynamically typed anyways.
One namespace and implicit currying is trivial and can be done in one
day. A good pattern matcher will take one week.
For me personally that means: all interesting things that OCaml offers
I can also use in Lisp. I could also get the syntax if I wanted it
(which I don't):
(ocamlet bar = (fun l -> ListLabels.fold_left ~init:0 ~f:(fun s (_,x) -> 
s + x) l))
vs
let bar = (fun l -> ListLabels.fold_left ~init:0 ~f:(fun s (_,x) -> s + 
x) l)


> (Notice that your answer should not refer to specific techniques like
> "you cannot write self-modifying code", or "you cannot make a list
> that contains "three" and 3 without defining a datatype first" etc.
> Those are means, not ends.  What are specific software end-products,
> for which one would have to ditch Ocaml in favor of Lisp?)

Why not forbid even more things?
Well, for the domain of functional programming OCaml is fine. With its
static typing it is well suited for well defined tasks.
However, in the fields I am interested in there are no well defined
problems. I need to learn more about the problem while working on it.
To do that I want to get rid of the edit->compile->run cycle. I want
dynamic typing. I want an organic environment that I can change and that
can change itself during runtime. I want to be able to do imperative
programming, I want an object system, I want to be able to solve parts
of my problem in a declarative style with logic programming, etc.
So basically for most of what I want to do Lisp could be a good starting
point.


Andr�
-- 
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2fy37c9n3.fsf@my.address.elsewhere>
Andr� Thieme <······························@justmail.de> writes:

> While OCaml is specialized on functional programming (with huge
> trade offs for speed, see
> http://www.podval.org/~sds/ocaml-sucks.html )

I hadn't seen this site before.  Now that I have, let me say that at
least half the things it complains about are good things in my book.

> Static typing can be done in Lisp as a Library (see Qi).

No, that is a fallacy.  Qi does not add static typing to Lisp.  It
implements static typing in a separate language which happens to sit
on top of Lisp.  The same can be done in any other Turing-complete language.

> I personally
> would not want to use that feature. Most parts of interesting programs
> are kinda dynamically typed anyways.

I beg to differ.  In my experience, most parts of interesting programs
are statically typed.

> Why not forbid even more things?

The point is that one does things differently in ML-like languages or
in Haskell-like languages.  In other words, there is nothing that
can't be done, but there are lots of things that can't be done THE
SAME WAY.  The latter is, of course, by design.  Complaining about
static types does not lead anywhere.  People who use ML-like languages
LIKE static types.  Trying to convince me that ML sucks because of
static types leads nowhere.  (I have given up on Lisp precisely
because it does NOT provide the same kind of static guarantees that
ML-like languages provide.)

> Well, for the domain of functional programming OCaml is fine. With its
> static typing it is well suited for well defined tasks.
> However, in the fields I am interested in there are no well defined
> problems. I need to learn more about the problem while working on
> it.

Have you actually worked with ML-like languages?  (I mean, for more
than a toy project?)  I personally discovered that /especially/ when
programming in exploratory mode, the presence of a static type system
can be of /tremendous/ help -- provided you have enough experience to
use it to your advantage.

Matthias
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8in1f$52k$1@online.de>
Andr� Thieme schrieb:
> One namespace and implicit currying is trivial and can be done in one
> day. A good pattern matcher will take one week.

I think you're underrating the effort by at least an order of magnitude. 
Whipping something up might be as little as you said, but making it 
*good* and integrating it well with the rest of the system takes a lot 
more time.
IOW it may be technically simple, but getting the ergonomy right takes 
experimentation and (lots of) time.

> For me personally that means: all interesting things that OCaml offers
> I can also use in Lisp. I could also get the syntax if I wanted it
> (which I don't):

That's what I mean: setting up something that technically does some 
OCamlisms but doesn't give Lisp programmers anything is simple.
Setting it up in a way that's interesting even in a Lisp context is fare 
more difficult.

>> (Notice that your answer should not refer to specific techniques like
>> "you cannot write self-modifying code", or "you cannot make a list
>> that contains "three" and 3 without defining a datatype first" etc.
>> Those are means, not ends.  What are specific software end-products,
>> for which one would have to ditch Ocaml in favor of Lisp?)
> 
> Why not forbid even more things?

He said why not.

> However, in the fields I am interested in there are no well defined
> problems. I need to learn more about the problem while working on it.
> To do that I want to get rid of the edit->compile->run cycle.

You can do that in e.g. Haskell.

 > I want dynamic typing.

The question is: do you *need* it?
In a language with type inference, you can work almost as freely as in a 
language with dynamic typing. The compiler will carp only if you write 
something self-contradictory, such as

   "asdf" + 15

You can't have polymorphic lists directly, but there are straightforward 
ways around that.

 > I want an organic environment that I can change and that
> can change itself during runtime.

What for?

 > I want to be able to do imperative
> programming, I want an object system, I want to be able to solve parts
> of my problem in a declarative style with logic programming, etc.

All of this can be done in OCaml.

> So basically for most of what I want to do Lisp could be a good starting
> point.

I think you have done a great deal of self-adaptation to what Lisp offers.
I.e. if all you have is a multitool, your problems start to look like 
you need a multitool.

Regards,
Jo
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uabtfuhgk.fsf@nhplace.com>
[ comp.lang.lisp only
  http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Joachim Durchholz <··@durchholz.org> writes:

> > I want an organic environment that I can change and that
> > can change itself during runtime.
> 
> What for?

I started to write an explanation, but I realized it had all been said 
before so I'll just offer the relevant URL ...

http://www.ushistory.org/franklin/quotable/quote04.htm
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad2e4f$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Kent M Pitman wrote:
> Joachim Durchholz <··@durchholz.org> writes:
>> What for?
> 
> I started to write an explanation, but I realized it had all been said
> before so I'll just offer the relevant URL ...
> 
> http://www.ushistory.org/franklin/quotable/quote04.htm

Yet you sacrificed the freedom to write fast code in assembler for the
safety of Lisp.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ur6mqiptg.fsf@nhplace.com>
Jon Harrop <···@ffconsultancy.com> writes:

> Kent M Pitman wrote:
> > Joachim Durchholz <··@durchholz.org> writes:
> >> What for?
> > 
> > I started to write an explanation, but I realized it had all been said
> > before so I'll just offer the relevant URL ...
> > 
> > http://www.ushistory.org/franklin/quotable/quote04.htm
> 
> Yet you sacrificed the freedom to write fast code in assembler for the
> safety of Lisp.

Uh, nice try, but the use of old Jedi mind tricks doesn't work here.

You know nothing about the languages I do program in and have
programmed in, and consequently your remark is necessarily uninformed.
It reads more like a desperate attempt by someone with nothing to add
to get in a dig where none is called for.

I should observe as a matter of history, by the way, that there have
been many assembly languages in Lisp, and that the Lisp Machine itself
was Lisp all the way down into the hardware.  (And no, it didn't go
away because it was slow; it went away for business positioning reasons
unrelated to its technical nature.)

About the only thing that seems right in your feeble attempt to be
cute, and I suspect only accidentally since you sound like you were
trying hard to souund ironic, is that I agree that "freedom" and
"safety" are enemies of one another.  Freedom is a close relative of
flexibility, and safety is its enemy.

For example, this is why it's dangerous to refer to the US as the
"safest country on earth" (as I hear some people saying).  It at least
strives to be the most free, but one should understand that such
freedom comes at the cost of safety, which is the foundation of
Franklin's remark.  The Big Gamble which is the US is whether prior
restraint (betting on the badness of people, locking them up before a
crime, holding them back from anything that is not previously proven
to be good) is a better theory of governance than letting people rise
to their own level (at the possible cost of safety).

Further a more specific example, we reserve to the people the right to
assembly (if you'll pardon my inadvertant use of "assembly language"
here), not because we know it to be safe, but because we know hope
that it will be safe, and because we pray (even those of us who are
not religious) that people will more often do good than bad in such
cases.  At any point where we forget that the rights we guarantee are
not inherently safe, but rather are gambles on safety, and where we
begin to fear that advance proof should be required, we will have lost
our freedom.

In that light, the choice to use Lisp, and its flexibility, is
knowingly unsafe in some regards, since there are ways to achieve
small additional forms of safety here and there by using something
else... it's just that there is a casualty in the process: the loss of
the possible upside that comes in the gap between what is provably
possible and what is theoretically possible.

And finally, as to the question of freedom itself, which you have
implicitly called into question through your remark: I ask myself--WWFS?
("What would Franklin say?")  I imagine he'd say something to the 
effect of:

 "Freedom is not sacrificed by the choice to avail oneself of the
  legitimate options it presents.  Nor does freedom require one to
  observe consistency of choice, but rather the opposite: Consistency
  of choice requires freedom; otherwise, it is not consistency but
  mere consequence, and certainly then is no longer choice at all."
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad5f5d$0$1602$ed2619ec@ptn-nntp-reader02.plus.net>
Kent M Pitman wrote:
> It reads more like a desperate attempt by someone with nothing to add
> to get in a dig where none is called for...

Franklin's propaganda was irrelevant because there is no useful notion
of "freedom" in this context. You might as well cite this:

http://www.presentationhelper.co.uk/winston_churchill_speech_fight_them_on_beaches.htm

and imply that dynamic typing is the beach. It doesn't make any sense. Its
just crazy talk...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8iqkf$fcc$1@aioe.org>
Joachim Durchholz escreveu:
> Andr� Thieme schrieb:
[snipped]

>  > I want to be able to do imperative
>> programming, I want an object system, I want to be able to solve parts
>> of my problem in a declarative style with logic programming, etc.
> 
> All of this can be done in OCaml.
> 
>> So basically for most of what I want to do Lisp could be a good starting
>> point.
> 
> I think you have done a great deal of self-adaptation to what Lisp offers.
> I.e. if all you have is a multitool, your problems start to look like 
> you need a multitool.
> 
Joachim,

You really are asserting that OCaml has figured out a programming 
paradigm which encopasses all the problems software engineers have to 
cope in a day-to-day basis?
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f8k35v$hlq$1@online.de>
Cesar Rabak schrieb:
> Joachim Durchholz escreveu:
>> Andr� Thieme schrieb:
> [snipped]
> 
>>> I want to be able to do imperative
>>> programming, I want an object system, I want to be able to solve parts
>>> of my problem in a declarative style with logic programming, etc.
>>
>> All of this can be done in OCaml.
>>
>>> So basically for most of what I want to do Lisp could be a good starting
>>> point.
>>
>> I think you have done a great deal of self-adaptation to what Lisp 
>> offers.
>> I.e. if all you have is a multitool, your problems start to look like 
>> you need a multitool.
> 
> You really are asserting that OCaml has figured out a programming 
> paradigm which encopasses all the problems software engineers have to 
> cope in a day-to-day basis?

Yes.

Take OO for an example. It's a nice and dandy feature, but FPL 
programmers have found that they don't really need it. The usual 
consensus over herer in comp.lang.functional is that in most cases, 
objects are just a clumsy way to express closures; in those cases where 
OO is more than that, they can do the same using a few records of 
closures; and the remaining cases are uninteresting because they use 
exactly those features of OO that are dangerous (open recursion).
I once took part in a library standardization effort for an OO language 
(the string library for Eiffel). Various subtle incompatibilities 
between the string libraries of different vendors had created the need 
to nail down the library's semantics, once and for all, using 
assertions. One of the lessons that I took home was that those 
assertions had better been written in a pure FPL, that the 
postconditions would have already been the implementation in a pure FPL, 
and that a really complete set of postconditions would leave no room for 
meaningful variations in subclasses, so by nailing down the semantics of 
a class you effectively close it against subclassing (the only variation 
that's left is implementation, which doesn't need open recursion, or 
extending the set of attributes, which can be done using aggregation).

In other words, I have experienced strong evidence that OO as such 
doesn't give you anything that you really need, even if you want it.
(That's OO that uses implementation inheritance. Subtyping is still 
useful IMHO, but you don't need implementation inheritance to do that 
effectively.)

I had entered that standardization effort as a firm believer that OO is 
a Good Thing, and I Need it (and others too).
I came out of it realizing that OO Just Doesn't Work As Advertized, and 
that I had been shoehorning all problems into terms of OO. (I.e. all 
problems looked like they needed that special OO multitool, to me.)

Anybody who insists that he needs OO in his toolkit is suspect of doing 
the same.


I don't really know whether the same kind of observation holds for 
macros or not.
I do see you can do a lot of things that you need macrology for in Lisp 
can easily be done in any FPL using higher-order functions. (Also in 
Lisp, it's just that macros would be preferred.)
I do suspect that what's remaining isn't very interesting, though I'd 
have to invest a few years of actual programming practice in Lisp to say 
for sure. (Nobody is going to pay me for this, so I'm unable to test 
that hypothesis. The only people qualified to answer that question would 
be those who moved from a non-Lisp FPL to a Lisp one and who have to 
offer more than just advocacy - are there any out there? Some of those 
who moved in the other direction have already spoken, though none has 
elaborated yet on what exactly they found better and what they found worse.)

Regards,
Jo
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073000234337709-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-29 14:42:10 -0400, Joachim Durchholz <··@durchholz.org> said:

> In a language with type inference, you can work almost as freely as in 
> a language with dynamic typing.

And if the limited correctness proofs and increased performance are 
worth this inconvenience - 'almost as freely' isn't the same as 'as 
freely' in exploratory programming - then you'll choose a modern 
statically typed language. If the incovenience is not worth it to you, 
you'll stick with lisp.

This isn't advanced mathematics people. The trade offs are obvious. 
People who have used lisp and ocaml (for example) and continue to use 
lisp obviously value the convenience and dynamism more than the limited 
correctness proofs and some performance gains. Those who continue to 
use ocaml (for example) obviously value the limited correctness proofs 
and performance gains more.

What strikes the correspondents of c.l.l as sleazy is Jon's pretending 
to be interested in lisp, but really only trying to sell his consulting 
services in a newsgroup that has greater traffic than that for *all* 
functional languages combined.

The greater traffic of c.l.l. might also suggest something about which 
things more programmers value.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xbqduy077.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> This isn't advanced mathematics people. The trade offs are
> obvious. People who have used lisp and ocaml (for example) and
> continue to use lisp obviously value the convenience and dynamism more
> than the limited correctness proofs and some performance gains.

Are there are a lot of these?

> Those who continue to use ocaml (for example) obviously value the
> limited correctness proofs and performance gains more.

I don't know if I'd dignify the benefits of ML-style static typing
with a fancy term like "limited correctness proof".  I'd say something
more like: language-verified type consistency gives the program
something like a rigid skeleton on which to build its functionality.
Lisp and Python programs have always evoked sort of a flopping,
"invertebrate" sensation for me.  Alan Perlis's SICP foreword famously
said:

    Pascal is for building pyramids -- imposing, breathtaking, static
    structures built by armies pushing heavy blocks into place. Lisp is
    for building organisms -- imposing, breathtaking, dynamic structures
    built by squads fitting fluctuating myriads of simpler organisms into
    place.

ML-like languages, I like to imagine, aim for something in the middle:
graceful spires and leaping archways built on solid foundations
through precise engineering.

> The greater traffic of c.l.l. might also suggest something about which
> things more programmers value.

clf is mostly a theory discussion group, I think.  Lisp has been
around a long time and has more users, and languages like CL have a
lot more "creature comforts" (e.g. debugging environments, language
features like keyword args) than the ML family.  But really, CL and
even Scheme are 1970's languages, and things have been happening since
then.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073000530584492-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 00:39:24 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> But really, CL and
> even Scheme are 1970's languages, and things have been happening since
> then.

Yes, but imho they've been moving in precisely the wrong direction. 
Modern statically typed languages found their birth at a time when 
interactive computing as we know it today was not the norm. The 
argument has been made [1, 2, 3] that the future of computing is no 
longer of the model:

known range of inputs -> algoritmic processing -> known range of outputs

but rather:

partially known range of inputs -> combination of human interactive and 
algorithmic processing -> unpredictable range of outputs


If this is the future of computing, then the focus on static typing is 
a massive effort in solving the wrong problem.

1. http://www.cse.uconn.edu/~dqg/papers/cacm02.rtf

2. http://www.cse.uconn.edu/~dqg/papers/cie05.pdf

3. http://www.cse.uconn.edu/~dqg/inter_book.html
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xfy368mly.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> 1. http://www.cse.uconn.edu/~dqg/papers/cacm02.rtf
> 2. http://www.cse.uconn.edu/~dqg/papers/cie05.pdf
> 3. http://www.cse.uconn.edu/~dqg/inter_book.html

I don't see what these three links have to do with static vs dynamic
types in programming languages.  Certainly, tons of interactive
programs are written in C and Java.  The idea that the interactive
computation model is really different is a bit dubious too; see how
Haskell uses monads to implement sequenced computation in a functional
way.

I've written a lot more Lisp code than I've written Haskell or ML, so
maybe what I'm hoping for from static types is wishful thinking.  But
I've made way more runtime type errors in Lisp than I'm happy about,
that could have been caught at compile time with a static language.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073011114822503-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 01:52:09 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> I don't see what these three links have to do with static vs dynamic
> types in programming languages.

This reply is to you as well as Matthias and Markus.

No one doubts that *after* parsing inputs one can have a statically 
typed expression to evaluate. But you still need to do run-time checks 
(i.e., the input parsing) and you must work to avoid forcing the users 
into a narrow input channel - making them choose only a narrow subset 
of possible inputs because that's all your parser and statically type 
checked code were designed to handle. And what happens when you wan't 
to add more valid input types to a running system?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xy7gx7w2c.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> No one doubts that *after* parsing inputs one can have a statically
> typed expression to evaluate. But you still need to do run-time checks
> (i.e., the input parsing) and you must work to avoid forcing the users
> into a narrow input channel - making them choose only a narrow subset
> of possible inputs because that's all your parser and statically type
> checked code were designed to handle. And what happens when you wan't
> to add more valid input types to a running system?

I just can't think of many instances where I'd want to do anything
like like the above.  The thing I'm working on now is written in a
dynamic language (Python) yet all its user input can be described
statically.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073102000275249-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 11:25:31 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> I just can't think of many instances where I'd want to do anything
> like like the above.  The thing I'm working on now is written in a
> dynamic language (Python) yet all its user input can be described
> statically.

Then, with respect, you may not be working on a very dynamic problem. 
What if the user community invents a new process, or new input types, 
or their interaction results in new types of outputs? Your site has to 
go down in order to add this (or does python have some equivalent of 
update-instance-for-redefined-class).
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xhcnlt6x9.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> Then, with respect, you may not be working on a very dynamic
> problem. What if the user community invents a new process, or new
> input types, or their interaction results in new types of outputs?

I don't understand this.  The inputs and outputs are all characters.
Are they going to invent new kinds of characters?  We already handle
unicode.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073102425450878-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-31 02:37:38 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> I don't understand this.  The inputs and outputs are all characters.
> Are they going to invent new kinds of characters?  We already handle
> unicode.

1. I was speaking of somwhat higher level, more abstract types than characters.

2. What happens when your users want to input/output videos, or images, 
or music, or ... ?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xmyxdkql4.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> 2. What happens when your users want to input/output videos, or
> images, or music, or ... ?

Well, those are files, I'd say, i.e. blobs of characters that are
decorated with more blobs of characters (filename, description...)
that say what they are.  Anyway to answer your other question, yes, we
stop the application in order to change the software.  I don't see any
need for anything as messy as hot patching.  At the moment we simply
tolerate a brief service outage when we take the server down, but once
we get our act together better we'll handle it by switching to a
backup server through a normal failover operation so there's no
outage.  We don't have any significant per-session server-side state
needing to be saved, but even if we did, propagating it to a backup
server is not all that complex an operation.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073109063677923-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-31 02:57:43 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> Well, those are files, I'd say, i.e. blobs of characters that are
> decorated with more blobs of characters

This is just the reductio ad absurdum union type case of 
big-bag-of-bits, not a static type - it doesn't allow static analysis 
at compile time because you must check what's in the bag-o-bits at 
runtime, and you can't possibly know what to check for at compile time 
if your application evolves to include *new* types while it's running.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7x7iog1m4f.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> This is just the reductio ad absurdum union type case of
> big-bag-of-bits, not a static type - it doesn't allow static analysis
> at compile time because you must check what's in the bag-o-bits at
> runtime, and you can't possibly know what to check for at compile time
> if your application evolves to include *new* types while it's running.

Just like my office-mate doesn't grow new arms, legs, or heads while
she's coding, the application we're working on doesn't evolve new
types while it's running.  One could of course imagine science-fiction
scenarios in which both of the above happen, but plenty of things
would then be a lot different in both cases.  We get by the way things
are.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007080109375264440-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-31 14:09:04 -0400, Paul Rubin <·············@NOSPAM.invalid> said:

> We get by the way things
> are.

This of course could be said of Visual Basic too. Again, the discussion 
is about what's best, not what you can get by with.
From: Alain Picard
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <874pjj4e38.fsf@memetrics.com>
Paul Rubin <·············@NOSPAM.invalid> writes:

> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>> if your application evolves to include *new* types while it's running.
>
> Just like my office-mate doesn't grow new arms, legs, or heads while
> she's coding, the application we're working on doesn't evolve new
> types while it's running.

And yet in Common Lisp it's quite possible to imagine an application
evolving new types as it runs.  Heck, from one viewpoint, that's
even the _normal_ way it works: your application (the lisp image)
evolves as you load into it new functionality (fasls containing
type definitions).
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xejini0hj.fsf@ruckus.brouhaha.com>
Alain Picard <············@memetrics.com> writes:
> > Just like my office-mate doesn't grow new arms, legs, or heads while
> > she's coding, the application we're working on doesn't evolve new
> > types while it's running.
> 
> And yet in Common Lisp it's quite possible to imagine an application
> evolving new types as it runs.  Heck, from one viewpoint, that's
> even the _normal_ way it works: your application (the lisp image)
> evolves as you load into it new functionality (fasls containing
> type definitions).

Yes, the thing I'm doing is in Python, which could also do that if we
wanted to (using "import").  I think the class of applications where
that's really useful is somewhat limited.  If you're trying to run a
high-reliability service, you MUST handle the case where the CPU
catches on fire without stopping the service (i.e. you seamlessly
switch the operation to failover hardware).  If you can do that, then
you don't need to hot-patch, just load the new code onto the failover
hardware, then switch.  If your reliability requirements are not such
that you need to handle the CPU catching on fire, then by definition
you can tolerate a little bit of downtime for occasional software
restarts.

There's considerable benefit from using source control and not running
stuff on a production server until it's been tested on a staging
server, preferably by an automatic build that starts with a complete
checkout from the source repository.  This dynamic patching stuff
sounds way too haphazard by comparison.

The main reason to want to hot patch IMO is because you have a lot of
live data or connections inside the server that you don't want to lose
in a restart.  Typical apps these days (including the one I'm doing)
store the persistent data in a database (separate process), giving
separation of concerns so you can restart the application without
losing the data.  Since we're doing a web app we don't worry about
dropping TCP connections, we just let the clients automatically reopen
them.  HTTP is basically connectionless and keepalive is an optional
performance hack.  Even if we had to keep connections up, on Unix
systems (certainly on Linux) there's usually a mechanism for migrating
connections from one process to another, so you'd start the new server
in a new process and migrate the connections.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <joswig-EA9107.20485201082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Alain Picard <············@memetrics.com> writes:
> > > Just like my office-mate doesn't grow new arms, legs, or heads while
> > > she's coding, the application we're working on doesn't evolve new
> > > types while it's running.
> > 
> > And yet in Common Lisp it's quite possible to imagine an application
> > evolving new types as it runs.  Heck, from one viewpoint, that's
> > even the _normal_ way it works: your application (the lisp image)
> > evolves as you load into it new functionality (fasls containing
> > type definitions).
> 
> Yes, the thing I'm doing is in Python, which could also do that if we
> wanted to (using "import").  I think the class of applications where
> that's really useful is somewhat limited.  If you're trying to run a
> high-reliability service, you MUST handle the case where the CPU
> catches on fire without stopping the service (i.e. you seamlessly
> switch the operation to failover hardware).  If you can do that, then
> you don't need to hot-patch, just load the new code onto the failover
> hardware, then switch.  If your reliability requirements are not such
> that you need to handle the CPU catching on fire, then by definition
> you can tolerate a little bit of downtime for occasional software
> restarts.
> 
> There's considerable benefit from using source control and not running
> stuff on a production server until it's been tested on a staging
> server, preferably by an automatic build that starts with a complete
> checkout from the source repository.  This dynamic patching stuff
> sounds way too haphazard by comparison.
> 
> The main reason to want to hot patch IMO is because you have a lot of
> live data or connections inside the server that you don't want to lose
> in a restart.  Typical apps these days (including the one I'm doing)
> store the persistent data in a database (separate process), giving
> separation of concerns so you can restart the application without
> losing the data.  Since we're doing a web app we don't worry about
> dropping TCP connections, we just let the clients automatically reopen
> them.  HTTP is basically connectionless and keepalive is an optional
> performance hack.  Even if we had to keep connections up, on Unix
> systems (certainly on Linux) there's usually a mechanism for migrating
> connections from one process to another, so you'd start the new server
> in a new process and migrate the connections.

One areas where I like it especially:

* during development. The whole idea of interactive development
  depends on that. This gives a huge productivity
  advantage.

* To deliver patches to the customer. Some of the commercial
  Lisps only send you a complete new development
  environment once a (few) year(s) or so. The rest is then
  delivered as patches that are loaded into the
  environment on demand. You don't even need to quit
  the environment for that. Since you can change most
  of the application, it is not needed to reinstall
  the application or to recompile it. This can
  also be used for delivered applications.

-- 
http://lispm.dyndns.org
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46af6ca0$0$1612$ed2619ec@ptn-nntp-reader02.plus.net>
Raffael Cavallaro wrote:
> This is just the reductio ad absurdum union type case of
> big-bag-of-bits, not a static type - it doesn't allow static analysis
> at compile time because you must check what's in the bag-o-bits at
> runtime, and you can't possibly know what to check for at compile time
> if your application evolves to include *new* types while it's running.

Yes, static typing is irrelevant here.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <m2ir80vtga.fsf@my.address.elsewhere>
Jon Harrop <···@ffconsultancy.com> writes:

> Raffael Cavallaro wrote:
>> This is just the reductio ad absurdum union type case of
>> big-bag-of-bits, not a static type - it doesn't allow static analysis
>> at compile time because you must check what's in the bag-o-bits at
>> runtime, and you can't possibly know what to check for at compile time
>> if your application evolves to include *new* types while it's running.
>
> Yes, static typing is irrelevant here.

This is, of course, not true.  Check out, e.g., Andreas Rossberg's
work on Alice ML for possible approaches.
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <vC%ri.21220$j7.380604@news.indigo.ie>
Matthias Blume wrote:

>>
>> Yes, static typing is irrelevant here.
> 
> This is, of course, not true.  Check out, e.g., Andreas Rossberg's
> work on Alice ML for possible approaches.


However, it must be noted that  Alice ML /ceases to be/ pure static
typing.

I think a large part of the problem comes from the use of the
word "static". Lispers will predictably summarily reject "pure static"
typing, fixating on nonavailability-at-runtime. (remember in lisp, the
evaluator, macroexpander, compiler, etc. are conventionally available
at run-time - it seems near nonsense for any associated type system not
to be similarly so in that light).

Maybe if you said "expressive and comprehensive type system" or
something...  But using the word "static" as some sort of shorthand
for "formalised type theory" is highly counterproductive if trying to
communicate utility of type theories beyond compile-time checking
(checking, if you recall, that lispers already do in modern lisp
implementations to some extent and usually accept as a "good thing"). 
It's far too easy for people to think "static" implies "static only"
(or for that matter that "dynamic" implies "dynamic only") when recent
(and not so recent at least in the lisp case) developments in both
camps are toward various attempted syntheses of the two.  "gradual
typing" and so forth.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46b0a1c6$0$1607$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Blume wrote:
> This is, of course, not true.  Check out, e.g., Andreas Rossberg's
> work on Alice ML for possible approaches.

Did Alice ML introduce anything relevant not found in HashCaml or F#? I
can't see how it can statically type something inherently dynamic...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <87sl72ljk7.fsf@dnsalias.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> No one doubts that *after* parsing inputs one can have a statically
> typed expression to evaluate. But you still need to do run-time checks
> (i.e., the input parsing) and you must work to avoid forcing the users
> into a narrow input channel - making them choose only a narrow subset
> of possible inputs because that's all your parser and statically type
> checked code were designed to handle. And what happens when you wan't
> to add more valid input types to a running system?

I don't know what type of running system you mean so I'll use an
example of a IP firewall.  In this case the requirement is that you
can't take it down but also that it should not be a single point of
failure.  Thus there are at least two firewalls running acting as a
cluster.  To "add more input types" one upgrades the secondary server
with the new code which supports the new types and where necessary
contains upgrade code to convert from the old type to the new types
and then cause a failover which switches traffic to the new box.  You
can now upgrade the other box and if desired, cause a failover to make
it the primary again.  This approach also means it is possible to
restart either box with the new configuration when it is a secondary
without distrupting traffic thereby ensuring that should a restart
actually happen, it does re-start correctly.  Why do that?  Well
customers tend to get rather annoyed if after a hot deploy they run
fine only to have a problem on reboot (say due to complete power
outage) because the hot deploy doesn't work right.

Of course you don't have to do any of that and you can just hot deploy
to a live system making use of whatever type system support you can
get.  The right call depends on your customer expectations, the
quality of your code and QA process.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007080200471643042-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-08-01 23:08:42 -0400, ·······@dino.dnsalias.com (Stephen J. 
Bevan) said:

> To "add more input types" one upgrades the secondary server
> with the new code

And you can perform this upgrade without bringing the secondary server 
down (you can with lisp). If not, during the upgrade, the primary *is* 
a single point of failure.
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <87odhqkoqj.fsf@dnsalias.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> On 2007-08-01 23:08:42 -0400, ·······@dino.dnsalias.com (Stephen
> J. Bevan) said:
>
>> To "add more input types" one upgrades the secondary server
>> with the new code
>
> And you can perform this upgrade without bringing the secondary server
> down

Yes, replace "the secondary" with "a secondary" i.e. there can be more
than two nodes in the cluster.  Whether the customer is willing to pay
for that depends on whether their cost:benefit analysis shows it is
worth the additional cost to avoid a single point of failure in the
seconds it takes to upgrade a secondary.

> (you can with lisp).

You can with any language, it is a matter of the cost:benefit ratio.
Lisp, along with other dynamic languages such Smalltalk, make *some* of
the costs negligible but has no impact on others.  If the others are
what concerns the customer than the language choice is immaterial.
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <200708021036267987-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J. 
Bevan) said:

> You can with any language, it is a matter of the cost:benefit ratio.
> Lisp, along with other dynamic languages such Smalltalk, make *some* of
> the costs negligible but has no impact on others.

I think you miss the point here: with a true dynamic language you can 
perform an upgrade on a running server, while it is still running, 
without bringing it down.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xsl72osmo.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> I think you miss the point here: with a true dynamic language you can
> perform an upgrade on a running server, while it is still running,
> without bringing it down.

I think this is also done in java using class loaders, but I'm still
missing why it's important.  And somewhere deep down there's probably
a semantic quibble about what a "running server" is.  I.e. it
certainly should be ok to organize a server into multiple processes,
so I don't see why an upgrade process that involves starting new
processes to replace old ones should be a problem.
From: Kent M Pitman
Subject: tired of shootouts, cross-posting, and closed minds
Date: 
Message-ID: <ulkcs184d.fsf_-_@nhplace.com>
[ comp.lang.lisp only; http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Paul Rubin <·············@NOSPAM.invalid> writes:

> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> > I think you miss the point here: with a true dynamic language you can
> > perform an upgrade on a running server, while it is still running,
> > without bringing it down.
> 
> I think this is also done in java using class loaders, but I'm still
> missing why it's important.  And somewhere deep down there's probably
> a semantic quibble about what a "running server" is.  I.e. it
> certainly should be ok to organize a server into multiple processes,
> so I don't see why an upgrade process that involves starting new
> processes to replace old ones should be a problem.

I think a server is a pessimal situation to discuss this because
servers are almost always designed to have all their data secure when
they crash, and so it is nearly impossible to design a situation in
which not losing your in-memory data and/or its spread out, cached,
and incidentally organized state is a loss.

Consequently, I think using this as an example will only work to those
with open minds (looking to listen for how something could happen in a
certain circumstance, even if it might not necessarily happen that way
under all circumstances) not those with closed minds (only willing to
listen when they are shown that their own point of view and preferred
scenario will not work under any scenario).

However, there are many such interactive situations developing code
where the reason the stuff is not in databases is that it's only
half-devised, and where reloading new program without losing the data
state is critical to efficient debugging, and moreover where the
ability to redesign paradigms is not just incidental but core to how
Lispers, at least, are used to debugging.

Can there be another form of develping/debugging than Lispers use?
Surely.  Every successful language evolves a culture around the tools
it offers and the techniques it makes easy, turning those things it
makes hard into things to be feared by its users.  That rarely proves
much of anything about those other things at an intrinsic level, I
suspect; more often it just explains people's natural nervousness
about the unfamiliar.  I do think it's a bit haughty of the non-Lisp
camp in this discussion to presume that the Lisp camp is filled with
people speaking out of ignorance, though; some of us have used these
other languages under discussion and simply don't prefer them, their
touted virtues notwithstanding.

In the end, "proving" that language X is not the language anyone
should use because you can find a user who purports to believe that
language Y lacks for nothing in any programming circumstance he has
either encountered or can conceive of is not very much of a proof.
See Clarke's First Law 
 [ http://en.wikipedia.org/wiki/Clarke's_three_laws ] 
for a catchier rendering of this observation.

And, I emphasize, to underscore the asymmetry of this entire
discussion, that I have at no time asserted that any of the functional
languages is incapable of doing things interesting to someone.  Nor
have many others here.  Rather, one individual person has tried to
orchestrate a debate where none was requested.  The fact that the term
"shootout" was used is provocative just at the outset, and the rest of
the followup has been similarly passive-aggressive, or sometimes even
actively so.  People who would otherwise prefer to remain silent are
being called upon to speak, lest their silence give implicit consent
to a firehose of misstatements.  And yet, our group is not that as
interested in evangelizing as I think has been perceived on the larger
distribution (including the other half of this discussion that I have
elected not to cross-post to).

What I've personally said is not that someone else should find fault
in functional languages, but that I personally just prefer Lisp, and
have not found the features of functional languages personally
compelling, even when I've used them seriously on real work projects.
By contrast, I've seen an awful lot of name-calling going in reverse
about Lisp in this conversation.

I suspect the root cause is that the forums are cross-posted, and both
the audiences and their preferences are different in the two groups.
It's probably reasonable for the functional language people not to
like Lisp, since presumably they self-identify as functional language
folks, and I don't think of Lisp as a functional language really.  It
has functional features, but it doesn't feature them centrally in the
way some other languages do.  But I don't like it when people invade
comp.lang.lisp to talk about what they don't like about lisp.  While
it's an open forum, I'd prefer to help those people who have committed
to Lisp and are having trouble, not to argue endlessly with people who
don't really care about Lisp and aren't committed to knowing.  I
assume they feel the same way about me criticizing functional
languages.  If I don't like them, I keep my comments to some other
forum, since obviously comp.lang.functional is for people who are
committed to using and advancing functional languages, or else I limit
my remarks to things I consider would be seen as constructive by that
forum.  Cross-posting just about guarantees everyone's remarks will be
taken out of context, and it's why I am relentlessly removing
comp.lang.functional from my posts on this matter--I have no desire to
invade their happy space with this discusssion and wish anyone who
doesn't really care about Lisp would feel free not to invade our
space.

Certainly, of course, the middle ground of people who are working
seriously with Lisp and think it offers something important to them
beyond what other forums do and who are looking for ways to make best
advantage of that are not the people I am addressing here; my remarks
should not be construed as in any way saying Lisp is without
deficiency.  I just think that languages are local optima in a large
optimization space where if you drift a little ways from the defined
language, you are in no-man's land, and you may never regain your
hold.  Languages do not become better languages by drifting aimlessly
in the direction of other languages, they become better languages by
making quantized leaps in interesting directions, starting anew, and
trying to attract new audiences not by incrementally dismantling
another community, but by selling a new idea from scratch and seeing
if anyone comes to buy it.  Arc, has for example, tried to do that,
and with middling success.  I recently read about L#, for example,
which had somehow slipped by me before, and which seems to have some
relation to Arc.  There's another example of someone doing what I
regard as highly constructive: Starting something new and seeing where
it takes them.

This thing we're doing here of just randomly bashing one another as if
the languages we're respectively using are "the wrong languages" seems
utterly pointless at best and very socially destructive at worst.
It's destructive because it makes people feel attacked.  It's
destructive because it makes people enough personally angry that they
close their minds to ideas that on another day in another forum might
be quite interesting, because those ideas are not offered in a way
that invites interchange, they are offered in a way that says "defend
yourself or yield your right to have an opinion".  There is a line
between "peer review" and "peer pressure" and that line is, in my
book, drawn at the point where personal choice is replaced by social
pressure not to make a personal choice.

Pretty soon I'm going to have to just detach myself from this forum
and boycott it for a time.  I just can't stand much more of seeing the
forum hijacked this way and I really have better things to do with my 
time.
From: Christopher Browne
Subject: Re: tired of shootouts, cross-posting, and closed minds
Date: 
Message-ID: <60odhk93ul.fsf@dba2.int.libertyrms.com>
Kent M Pitman <······@nhplace.com> writes:
> Pretty soon I'm going to have to just detach myself from this forum
> and boycott it for a time.  I just can't stand much more of seeing the
> forum hijacked this way and I really have better things to do with my 
> time.

The trouble, notably with the "shootout" threads, is that the hijacker
(who I'll choose to leave nameless; more reasoning for that anon...) 
doesn't care (or lose anything) if you decide to leave.

It seems to me that marking the relevant threads as "killable" or
"uninteresting" or such is probably the better answer; if I drop out
the "shootout" thread, the amount of traffic falls to a more
interesting level.  That might be an answer...

I think it's worth watching the motivations out there; my suspicion is
that the group has attracted the attention of someone who "wins" every
time someone follows up to any thread he has participated in (and I
therefore decline to say anything more specifically descriptive).  It
has been suggested that the reason for him to be here is that he
wishes to sell books/magazines, and I think there is probably
considerable truth to this.

I was speculating that there may be the further reason that attracting
activity to his web site might draw people through "pay per click"
advertising, which is reasoning used for a number of sorts of things
these days, but a bit of a web browse suggests that he may be "just
about selling his stuff."

In any case it seems unfair for his actions to cause you to jump into
a "boycott," particularly as there is no reason for him to care about
any threats you may make.  Surely there's a better answer...
-- 
output = reverse("ofni.secnanifxunil" ·@" "enworbbc")
http://linuxdatabases.info/info/lsf.html
We are Pentium of Borg.  Division is futile. You will be approximated.
(seen in someone's .signature)
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <utzri0ytc.fsf@nhplace.com>
[ comp.lang.lisp only 
  http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen
> J. Bevan) said:
> 
> > You can with any language, it is a matter of the cost:benefit ratio.
> > Lisp, along with other dynamic languages such Smalltalk, make *some* of
> > the costs negligible but has no impact on others.
> 
> I think you miss the point here: with a true dynamic language you can
> perform an upgrade on a running server, while it is still running,
> without bringing it down.

Including migrating already-instantiated data of a given class to
accommodate a changed class definition under normal, object-oriented
program control.
From: Dimiter "malkia" Stanev
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46B2562F.2000804@gmail.com>
Well, for sure I would like to see that when doing, say game development.

Instead of constant restarts of the game, to the console (if that's a 
console game), one can extend features, classes, etc, until something 
desirable happens.

Right now my cycle (being a console game C++ programmer), is build 
(hopefully IncrediBuild helps it), deploy, restart, manual test, check 
to see whether it runs, and if it doesn't start again, until you find a 
solution.

And you don't have multiple servers running, you have only one image.

This type of coding (build, deploy, run, test, check) made to use global 
variables all over, so I can adjust values, until I get proper result, 
sometimes even more evil things (like directly patching in dissasembly 
through the debugger certain things - like putting NOP here and there - 
I don't like that). All this to avoid restarts, I don't care how much I 
tweaked the system, I just want non-stoppable process where I can tweak, 
tweak, tweak and then after couple of hours, collect all the results, 
stop the "image", rewrite the code, and restart. With C++ right now that 
takes ages, not sure about OCAML, or other languages. With Lisp this 
seems like normal flow.

I ain't saying Lisp would be good, for my kind of business, but 
something like CINT/ROOT (A C++ Interpretter) might work for us.

Raffael Cavallaro wrote:
> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J. 
> Bevan) said:
> 
>> You can with any language, it is a matter of the cost:benefit ratio.
>> Lisp, along with other dynamic languages such Smalltalk, make *some* of
>> the costs negligible but has no impact on others.
> 
> I think you miss the point here: with a true dynamic language you can 
> perform an upgrade on a running server, while it is still running, 
> without bringing it down.
> 
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-0049AC.00192003082007@news-europe.giganews.com>
In article <················@gmail.com>,
 "Dimiter \"malkia\" Stanev" <······@gmail.com> wrote:

> Well, for sure I would like to see that when doing, say game development.
> 
> Instead of constant restarts of the game, to the console (if that's a 
> console game), one can extend features, classes, etc, until something 
> desirable happens.
> 
> Right now my cycle (being a console game C++ programmer), is build 
> (hopefully IncrediBuild helps it), deploy, restart, manual test, check 
> to see whether it runs, and if it doesn't start again, until you find a 
> solution.
> 
> And you don't have multiple servers running, you have only one image.
> 
> This type of coding (build, deploy, run, test, check) made to use global 
> variables all over, so I can adjust values, until I get proper result, 
> sometimes even more evil things (like directly patching in dissasembly 
> through the debugger certain things - like putting NOP here and there - 
> I don't like that). All this to avoid restarts, I don't care how much I 
> tweaked the system, I just want non-stoppable process where I can tweak, 
> tweak, tweak and then after couple of hours, collect all the results, 
> stop the "image", rewrite the code, and restart. With C++ right now that 
> takes ages, not sure about OCAML, or other languages. With Lisp this 
> seems like normal flow.
> 
> I ain't saying Lisp would be good, for my kind of business, but 
> something like CINT/ROOT (A C++ Interpretter) might work for us.

You have read about GOAL?
http://en.wikipedia.org/wiki/Game_Oriented_Assembly_Lisp


> 
> Raffael Cavallaro wrote:
> > On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J. 
> > Bevan) said:
> > 
> >> You can with any language, it is a matter of the cost:benefit ratio.
> >> Lisp, along with other dynamic languages such Smalltalk, make *some* of
> >> the costs negligible but has no impact on others.
> > 
> > I think you miss the point here: with a true dynamic language you can 
> > perform an upgrade on a running server, while it is still running, 
> > without bringing it down.
> >

-- 
http://lispm.dyndns.org
From: Dimiter "malkia" Stanev
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B26ED0.8060307@gmail.com>
> You have read about GOAL?
> http://en.wikipedia.org/wiki/Game_Oriented_Assembly_Lisp

Yup. That got me started learning Lisp. Unfortunately Sony decided that 
Naughty Dog should go without Lisp, so that their stuff can be reused by 
other studios. From business point that makes sense, but not from 
technological one. They were having really fast iterating cycles, and 
last I've heard, they were trying to come up with some form of 
interpretted C/C++, I guess something like CINT/ROOT, UnderC, or CH 
where they can mix interpretted with compiled C++ code.

For example the "hot-stuff" any programmer is working on (say AI, or 
sound), could be interpretted for him, but compiled for everyone not 
working on it, and then the rest of the system compiled too. And when 
comes to shipping/milestones, play testing, everything is compiled.

Now this is not necessarily a never-stopping web-server development. I 
would normally expect at least one restart a day (you have to turn those 
XBOX-es off at the end of the day), but at least during day development 
you should not be interrupted by any compile stuff.

For example one thing that saves the scripters/designers is that usually 
they do write in some form of dynamic language - Lua, JavaScript, 
AngelScript or something written internally. That gives them lots of 
freedom to experiment without restarting the level if the game allows 
it. It also makes the life easier for them, they do not have to care 
about types, or correctness, but whether the desired game effect is 
achieved. It's not about the right stuff, but whether it's good enough.

I myself came to the conclusion, that Lisp would be fine for that job, 
although viable only for the PC platforms (Mac, Linux, Windows), and I 
could write a scripting language on top of it, or make a Lua 
implementation on top of it (there already exist Python implementation 
for AllegroCL), which would give me as a programmer, the joy of lisp 
macros, and the scripter (s) who would work on the project, the joy of 
their language, they won't see lisp at all, they won't know it, much as 
of they don't know that "C" or "C++" is behind them when they were using it.

In fact, my whole plan is to write a small kernel portion of the game in 
Lisp, and the rest to be in some form of "more fashionable" LUA 
scripting, and all would get compiled, with hopes to be faster than the 
standard Lua interpretter. Not sure, whether I can achieve it, or 
whether it's practical, but sounds good & exciting enough to work on it.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-152B74.02062403082007@news-europe.giganews.com>
In article <················@gmail.com>,
 "Dimiter \"malkia\" Stanev" <······@gmail.com> wrote:

> > You have read about GOAL?
> > http://en.wikipedia.org/wiki/Game_Oriented_Assembly_Lisp
> 
> Yup. That got me started learning Lisp. Unfortunately Sony decided that 
> Naughty Dog should go without Lisp, so that their stuff can be reused by 
> other studios. From business point that makes sense, but not from 
> technological one. They were having really fast iterating cycles, and 
> last I've heard, they were trying to come up with some form of 
> interpretted C/C++, I guess something like CINT/ROOT, UnderC, or CH 
> where they can mix interpretted with compiled C++ code.
> 
> For example the "hot-stuff" any programmer is working on (say AI, or 
> sound), could be interpretted for him, but compiled for everyone not 
> working on it, and then the rest of the system compiled too. And when 
> comes to shipping/milestones, play testing, everything is compiled.
> 
> Now this is not necessarily a never-stopping web-server development. I 
> would normally expect at least one restart a day (you have to turn those 
> XBOX-es off at the end of the day), but at least during day development 
> you should not be interrupted by any compile stuff.
> 
> For example one thing that saves the scripters/designers is that usually 
> they do write in some form of dynamic language - Lua, JavaScript, 
> AngelScript or something written internally. That gives them lots of 
> freedom to experiment without restarting the level if the game allows 
> it. It also makes the life easier for them, they do not have to care 
> about types, or correctness, but whether the desired game effect is 
> achieved. It's not about the right stuff, but whether it's good enough.
> 
> I myself came to the conclusion, that Lisp would be fine for that job, 
> although viable only for the PC platforms (Mac, Linux, Windows), and I 
> could write a scripting language on top of it, or make a Lua 
> implementation on top of it (there already exist Python implementation 
> for AllegroCL), which would give me as a programmer, the joy of lisp 
> macros, and the scripter (s) who would work on the project, the joy of 
> their language, they won't see lisp at all, they won't know it, much as 
> of they don't know that "C" or "C++" is behind them when they were using it.
> 
> In fact, my whole plan is to write a small kernel portion of the game in 
> Lisp, and the rest to be in some form of "more fashionable" LUA 
> scripting, and all would get compiled, with hopes to be faster than the 
> standard Lua interpretter. Not sure, whether I can achieve it, or 
> whether it's practical, but sounds good & exciting enough to work on it.

The original Dylan would have been a nice language for that.
A bit smaller and simpler than CL.
http://oop.rosweb.ru/dylan/book.annotated/contents.html

See also Sk8:
http://en.wikipedia.org/wiki/SK8
SK8 features:
    * A prototype-based, fully dynamic object system
    * An English-like scripting language supporting complex declarative operations on collections
    * A general and extensive graphics and event system based on containment and sophisticated rendering
    * A rich object framework with support for multimedia and for tool building, fully integrated with the operating system environment.
    * A feature-rich, direct manipulation development environment

All dead, but still worth for getting some inspirations. At least
for Sk8 there is source code.

-- 
http://lispm.dyndns.org
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186152808.536474.191070@l70g2000hse.googlegroups.com>
On Aug 3, 1:54 am, "Dimiter \"malkia\" Stanev" <······@gmail.com>
wrote:
> > You have read about GOAL?
> >http://en.wikipedia.org/wiki/Game_Oriented_Assembly_Lisp
>
> Yup. That got me started learning Lisp. Unfortunately Sony decided that
> Naughty Dog should go without Lisp, so that their stuff can be reused by
> other studios. From business point that makes sense, but not from
> technological one. They were having really fast iterating cycles, and
> last I've heard, they were trying to come up with some form of
> interpretted C/C++, I guess something like CINT/ROOT, UnderC, or CH
> where they can mix interpretted with compiled C++ code.
>
> For example the "hot-stuff" any programmer is working on (say AI, or
> sound), could be interpretted for him, but compiled for everyone not
> working on it, and then the rest of the system compiled too. And when
> comes to shipping/milestones, play testing, everything is compiled.
>
> Now this is not necessarily a never-stopping web-server development. I
> would normally expect at least one restart a day (you have to turn those
> XBOX-es off at the end of the day), but at least during day development
> you should not be interrupted by any compile stuff.
>
> For example one thing that saves the scripters/designers is that usually
> they do write in some form of dynamic language - Lua, JavaScript,
> AngelScript or something written internally. That gives them lots of
> freedom to experiment without restarting the level if the game allows
> it. It also makes the life easier for them, they do not have to care
> about types, or correctness, but whether the desired game effect is
> achieved. It's not about the right stuff, but whether it's good enough.
>
> I myself came to the conclusion, that Lisp would be fine for that job,
> although viable only for the PC platforms (Mac, Linux, Windows), and I
> could write a scripting language on top of it, or make a Lua
> implementation on top of it (there already exist Python implementation
> for AllegroCL), which would give me as a programmer, the joy of lisp
> macros, and the scripter (s) who would work on the project, the joy of
> their language, they won't see lisp at all, they won't know it, much as
> of they don't know that "C" or "C++" is behind them when they were using it.
>
> In fact, my whole plan is to write a small kernel portion of the game in
> Lisp, and the rest to be in some form of "more fashionable" LUA
> scripting, and all would get compiled, with hopes to be faster than the
> standard Lua interpretter. Not sure, whether I can achieve it, or
> whether it's practical, but sounds good & exciting enough to work on it.

If you're going only for a pc why don't you create FFI bindings for
the game engine below ? They won't take a lot of time especially if
you stay out from a c++ driven engines, but calling .Net engines or
those who already have  .net wrappers like Irrlicht from say RNDNZL
works just fine.
From: Dimiter "malkia" Stanev
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B375CA.1010700@gmail.com>
> If you're going only for a pc why don't you create FFI bindings for
> the game engine below ? They won't take a lot of time especially if
> you stay out from a c++ driven engines, but calling .Net engines or
> those who already have  .net wrappers like Irrlicht from say RNDNZL
> works just fine.

FFI could be expensive, but it's the only option for hardware graphics 
acceleration, such as OpenGL or DirectX. For the rest of the stuff, with 
enough "static type" declarations (such as (simple-array 'single-float 
(4)) for packed single float vectors (SIMD/Altivec), and some lisp 
assembler tricks, or sbcl vops, or who knows what implementation 
specific feature, I'm thinking of getting SIMD working for me, directly 
through the lisp. It won't look much like lisp, more like assembly, but 
that's good for me, having lisp serving me as high-level assembler & 
it's macro language is good enough for me.
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186151678.803242.289880@19g2000hsx.googlegroups.com>
On Aug 3, 12:09 am, "Dimiter \"malkia\" Stanev" <······@gmail.com>
wrote:
> Well, for sure I would like to see that when doing, say game development.
>
> Instead of constant restarts of the game, to the console (if that's a
> console game), one can extend features, classes, etc, until something
> desirable happens.
>
> Right now my cycle (being a console game C++ programmer), is build
> (hopefully IncrediBuild helps it), deploy, restart, manual test, check
> to see whether it runs, and if it doesn't start again, until you find a
> solution.
>
> And you don't have multiple servers running, you have only one image.
>
> This type of coding (build, deploy, run, test, check) made to use global
> variables all over, so I can adjust values, until I get proper result,
> sometimes even more evil things (like directly patching in dissasembly
> through the debugger certain things - like putting NOP here and there -
> I don't like that). All this to avoid restarts, I don't care how much I
> tweaked the system, I just want non-stoppable process where I can tweak,
> tweak, tweak and then after couple of hours, collect all the results,
> stop the "image", rewrite the code, and restart. With C++ right now that
> takes ages, not sure about OCAML, or other languages. With Lisp this
> seems like normal flow.
>
> I ain't saying Lisp would be good, for my kind of business, but
> something like CINT/ROOT (A C++ Interpretter) might work for us.
>
You develop console games, great, any titles finished ?

I had a limited exposure with creating raycar with lisp, especially
tweaking the physics was pain in  the ass with c++ as changing few
parameters and/or function lead to very different results and waiting
all the crup to compile for sometimes as big as half and hour as my
project advanced drived me crazy. Than I wrote a wrapper with lisp
controlling the physics and starting & stopping the game loop  from
the listener something like:

(defun game-loop ()
   (loop
     (if (esc-key-pressed return)
         (update-world))))

By changing the lisp classes and functions which control the code and
got an immediate feedback I managed to get a descent drifting in
matter of days, but the lisp raycar was slower for around 30% measured
with framerate. I encountered few lisp crushes after a few restarts
but it was far far better to develop with lisp than waiting for those
horrible builds and restarts.
From: Philippa Cowderoy
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <Pine.WNT.4.64.0708021624340.1772@sleek>
On Thu, 2 Aug 2007, Raffael Cavallaro wrote:

> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J. Bevan)
> said:
> 
> > You can with any language, it is a matter of the cost:benefit ratio. 
> > Lisp, along with other dynamic languages such Smalltalk, make *some* 
> > of the costs negligible but has no impact on others.
> 
> I think you miss the point here: with a true dynamic language you can 
> perform an upgrade on a running server, while it is still running, 
> without bringing it down.
> 

Which doesn't avoid the issue that the upgrade could still cause problems. 
By the time this is all dealt with, the gain from not having to shut it 
down for the upgrade itself may well be pretty miniscule.

-- 
······@flippac.org

There is no magic bullet. There are, however, plenty of bullets that
magically home in on feet when not used in exactly the right circumstances.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46b226c1$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Raffael Cavallaro wrote:
> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J.
> Bevan) said:
>> You can with any language, it is a matter of the cost:benefit ratio.
>> Lisp, along with other dynamic languages such Smalltalk, make *some* of
>> the costs negligible but has no impact on others.
> 
> I think you miss the point here: with a true dynamic language you can
> perform an upgrade on a running server, while it is still running,
> without bringing it down.

Sure, for some ill-specified notions of "dynamic
language", "upgrade", "server", "running" and "bringing it down". Overall,
not a terribly useful definition it has to be said...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <enodhpoann.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Raffael Cavallaro wrote:
>> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J.
>> Bevan) said:
>>> You can with any language, it is a matter of the cost:benefit ratio.
>>> Lisp, along with other dynamic languages such Smalltalk, make *some* of
>>> the costs negligible but has no impact on others.
>> 
>> I think you miss the point here: with a true dynamic language you can
>> perform an upgrade on a running server, while it is still running,
>> without bringing it down.
>
> Sure, for some ill-specified notions of "dynamic
> language", "upgrade", "server", "running" and "bringing it down". Overall,
> not a terribly useful definition it has to be said...

And considering that the host system is probably not a Lisp-System
anyway so for OS upgrades and other stuff at the same node, we'd have
to have a fall-over-scheme anyway: The situation where a dynamic
upgrade of this kind pays of strikes me as rather hypothetical. 

A counter example: Web servers. All service with client sessions of a
finite lifetime:

 (a) Have shared state in the database anyway. Synchronize two
     databases. Switch to the second database. (Most DBS already have
     mechanism in place for this kind of clustering and failover).

 (b) Now the database has alread migrated. 

 (c) Now use DNS load balancing to direct all service requests to
     processes on the second server. Then wait until all sessions with
     the first server have closed.
     
 (d) You can no shut down the second server.


My opinion is, that good systems are planned for hot failover anyway,
regardless of the programming language, so you don't patch a running
server: You just switch to a server / virtual machine with the new
software version.

Or you do scheduled maintainance. (Hell -- that has been good enough
for E-Bay for the better part of a dekade: Most people don't need
continuous service. And some that need don't need real time latencies
and can be designed as queue-and-process systems. There are so many
ways to avoid that a system needs to be hot upgraded ....)

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b2596e$0$1594$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. 2 wrote:
> My opinion is, that good systems are planned for hot failover anyway,
> regardless of the programming language...

Exactly.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <xczir7x6ls6.fsf@cbe.ericsson.se>
>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:

  M> And considering that the host system is probably not a
  M> Lisp-System anyway so for OS upgrades and other stuff at the same
  M> node, we'd have to have a fall-over-scheme anyway: The situation
  M> where a dynamic upgrade of this kind pays of strikes me as rather
  M> hypothetical.
[...]

  M> My opinion is, that good systems are planned for hot failover
  M> anyway, regardless of the programming language, so you don't
  M> patch a running server: You just switch to a server / virtual
  M> machine with the new software version.

While I agree that using a failover scheme is the only way to 
cover all common SW upgrade scenarios, being able to patch a 
live system can be extremely useful. We use it extensively during
testing and debugging. Patching e.g. an instrumented module this
way takes only a couple of seconds, while a redundancy upgrade
(esp. in our systems) is quite an undertaking(*).

In Erlang, enabling call profiling is in fact done by reading the
parse tree from the compiled module, inserting instrumentation 
code, and then loading the instrumented module into the live 
system. Since no data is modified, the code can simply be 
hot-swapped, and no restarts of any kind are usually needed.

Using hot-loading of code is also extremely useful at interops,
where Erlang programmers often debug and correct errors without
even re-establishing the signaling links, much less restarting 
the system. To those with more traditional technology, this seems
more or less like magic.

(*) Unlike web servers, which have very little state to speak of,
our systems also control different kinds of (stateful) hardware,
like DSPs, network processors, etc. This means that failover in
the control plane has to be coupled with audit procedures, to 
avoid inconcistencies between control plane and data plane.
In these systems, doing a proper redundancy upgrade can easily 
take hours of preparation, whereas a knowledgeable designer could
make a small change to a single module, load it and fire off a few
commands in the interactive shell to test an hypothesis.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186155445.418194.270810@g12g2000prg.googlegroups.com>
On Aug 3, 1:54 am, Ulf Wiger <·······@cbe.ericsson.se> wrote:
> (*) Unlike web servers, which have very little state to speak of,

Wrong.  Certain transactions may have stateless semantics, but web
servers often have a huge amount of state, if only for performance
reasons.  For example, search engines must service almost all queries
without hitting the disk.
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xcz3az07fsb.fsf@cbe.ericsson.se>
>>>>> "AF" == Andy Freeman <······@earthlink.net> writes:

  AF> On Aug 3, 1:54 am, Ulf Wiger <·······@cbe.ericsson.se> wrote:
  >>  (*) Unlike web servers, which have very little state to speak
  >>  of,

  AF> Wrong.  Certain transactions may have stateless semantics, but
  AF> web servers often have a huge amount of state, if only for
  AF> performance reasons.  For example, search engines must service
  AF> almost all queries without hitting the disk.

The context was upgrade through redundancy. Are you saying then 
that these web servers have replicated state in RAM? Which 
web servers are we talking about?

Just to be clear, my statement was relative to our products, which
must be able to handle failover without losing active sessions
(perhaps 30-50,000 of them per processor), where each session has
state in both control plane and data plane. Storing state on disk
is out of the question for performance reasons. In addition, there's
a wealth of statistical data as well as accounting data (which 
absolutely mustn't be lost.)

By comparison, I maintain that all web servers _I've encountered_
have very little state of relevance to hot-standby, but I could
be wrong of course. (:

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87odhoj8r5.fsf@dnsalias.com>
Ulf Wiger <·······@cbe.ericsson.se> writes:
> Just to be clear, my statement was relative to our products, which
> must be able to handle failover without losing active sessions
> (perhaps 30-50,000 of them per processor), where each session has
> state in both control plane and data plane.

Understood, but why does that imply ...

  This means that failover in the control plane has to be coupled with
  audit procedures, to avoid inconcistencies between control plane and
  data plane.  In these systems, doing a proper redundancy upgrade can
  easily take hours of preparation,

That is, what is the critical difference between your product and say
an IP firewall?  There you can update all the nodes in the cluster in
the time it takes to load the new image on all secondary nodes, reboot
and do an initial resync with the curent primary (say 1 minute per
node), combined with time it takes to failover the primary to one of
the now updated secondaries (say < 10 seconds) followed by the time to
load&reboot the old primary (again say 1 minute) and then fail back.
That's all with traffic continuously running through the cluster and
with no session loss.  While I wouldn't do this on a whim, it doesn't
really require much in the way of planning.  You just type one command
on the primary to initiate the update and sit back and watch while it
coordinates the updates and only updates itself if the secondaries
were updated successfully.
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xczy7gf5dfz.fsf@cbe.ericsson.se>
>>>>> "SJB" == Stephen J Bevan <·······@dino.dnsalias.com> writes:

  SJB> Ulf Wiger <·······@cbe.ericsson.se> writes:
  >>  Just to be clear, my statement was relative to our products,
  >>  which must be able to handle failover without losing active
  >>  sessions (perhaps 30-50,000 of them per processor), where each
  >>  session has state in both control plane and data plane.

  SJB> Understood, but why does that imply ...

  SJB> This means that failover in the control plane has to be coupled
  SJB> with audit procedures, to avoid inconcistencies between control
  SJB> plane and data plane.  In these systems, doing a proper
  SJB> redundancy upgrade can easily take hours of preparation,

  SJB> That is, what is the critical difference between your product
  SJB> and say an IP firewall?

Going out on a limb, I'd guess that one important difference is
complexity. While I'm not sure what you include when you speak
of an "IP firewall" (maybe you're referring to some fairly 
complex beast which comes with full provisioning support and 
can be placed in an unmanned facility and fully operated 
remotely? In that case, my observation may not be applicable).

Diving into google, I came across xanv.com, which boasts of
having full source for an IP firewall - 9 modules and more
than 50,000 lines of code. Our product line is made up of
a couple of million lines of code (excluding OS and middleware), 
and it does include a limited IP firewall. 50,000 lines of C++
is to us about the average size of a single component; the 
erlang-based "blocks" average ca 6 KLOC each.

Increased complexity has many ill effects: the size of the 
organization grows, and has to be complemented with all sorts
of supporting functions to keep track of everything that needs
to be done; requirements tend to get fuzzier, partly since you
get groups who deal with requirements full time, but can't 
really relate them to the actual code (which is written by
others); it also makes upgrades *much* more difficult to
manage, simply due to the sheer volume of stuff that needs 
to be considered.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <8h4pjciqzl.fsf@hod.lan.m-e-leypold.de>
Ulf Wiger wrote:

>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>
>   M> And considering that the host system is probably not a
>   M> Lisp-System anyway so for OS upgrades and other stuff at the same
>   M> node, we'd have to have a fall-over-scheme anyway: The situation
>   M> where a dynamic upgrade of this kind pays of strikes me as rather
>   M> hypothetical.
> [...]
>
>   M> My opinion is, that good systems are planned for hot failover
>   M> anyway, regardless of the programming language, so you don't
>   M> patch a running server: You just switch to a server / virtual
>   M> machine with the new software version.
>


> While I agree that using a failover scheme is the only way to 
> cover all common SW upgrade scenarios, being able to patch a 
> live system can be extremely useful. We use it extensively during

BTW: Thanks for sharing that. Not that you think this is
unappreciated. 

Regards -- Markus
From: Tamas Papp
Subject: get a life (was Re: shootout: implementing an interpreter for a simple procedural   language Minim)
Date: 
Message-ID: <87643rnwil.fsf_-_@pu100877.student.princeton.edu>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:

> Ulf Wiger wrote:
>
>>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>>
> [snip]

When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
this thread.  Get a life, stop spamming here.  Or at least kindly
refrain from posting to c.l.l. about topics not relevant to this
newsgroup.

Tamas
From: Markus E.L. 2
Subject: Re: get a life
Date: 
Message-ID: <plkcnxoq4.fsf@hod.lan.m-e-leypold.de>
Tamas Papp wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>
>> Ulf Wiger wrote:
>>
>>>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>>>
>> [snip]
>
> When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
> this thread.  Get a life, stop spamming here.  Or at least kindly
> refrain from posting to c.l.l. about topics not relevant to this
> newsgroup.

Ah, yes, the old "you're a spammer -- my news reader can't handle
that" argument. You might notice: I'm not the only one writing this
much about the very same topics on c.l.l. What I write are exclusively
answers to articles and answers to articles that have been posted to
c.l.l. at that.

I put it to everones consideration (especially the contributors from
c.l.l) to answer to c.l.f only, but please grant me the right to
answer on bothe groups to posts that have been crossposted to both
groups.

I also notice that you didn't try to call Rainer, Don and Rayiner
spammers.

Regards -- Markus


PS: You can down rate me at Google Groups. I recently heard that's
    what decides wether someone is a troll/spammer.
From: Duane Rettig
Subject: Re: get a life
Date: 
Message-ID: <o0myx38cjx.fsf@gemini.franz.com>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> Tamas Papp wrote:
>
>> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>>
>>> Ulf Wiger wrote:
>>>
>>>>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>>>>
>>> [snip]
>>
>> When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
>> this thread.  Get a life, stop spamming here.  Or at least kindly
>> refrain from posting to c.l.l. about topics not relevant to this
>> newsgroup.
>
> Ah, yes, the old "you're a spammer -- my news reader can't handle
> that" argument. You might notice: I'm not the only one writing this
> much about the very same topics on c.l.l. What I write are exclusively
> answers to articles and answers to articles that have been posted to
> c.l.l. at that.

I've never considered you a spammer, but I also had a similar reaction
this morning (I was wondering where you found the time to answer every
article on the thread, and was wondering whether you had lost your job
or something :-)  But then, as I read a couple of your replies that
implied that you had no time for an answer, followed by _more_
replies, I realized you appear to be caught in a typical trap that
boils down to an "addiction to answering" (my coinage, as far as I
know).  This means that you can't let an article go that you disagree
with without answering, even if you've already answered an almost
identical article before; I've seen several of these replies from you
where the only thing you say is "I've answered this elsewhere" without
adding any information to the article. Perhaps this appearence is not
accutrate, but it is the perception that I have.  It is really a
question of asking yourself "Can I let this one go?" - if the answer
is "no" most of the time, then you have the addiction.  And the only
reason I'm writing to you here is to warn you (and any others who
might have the same addiction) that such an addiction is potentially
harmful to your health; there have been others on newsgroups who have
had health problems because of it.

> I put it to everones consideration (especially the contributors from
> c.l.l) to answer to c.l.f only, but please grant me the right to
> answer on bothe groups to posts that have been crossposted to both
> groups.

But that cuts both ways.  Most people who feel they are answering
autoritatively (note that I include myself in this category, since I
am also corss-posting this, though I don't usually do so) will also
continue the cross-post.  The only way to stop the cross-posting is to
take the initiative yourself to redirect or to not answer at all.

> I also notice that you didn't try to call Rainer, Don and Rayiner
> spammers.

Again, I don't consider you a spammer, but the difference in
perception between them and you is because they are not answering
every post, as far as I perceive - they seem to pick their
battlegrounds more carefully.

> Regards -- Markus
>
>
> PS: You can down rate me at Google Groups. I recently heard that's
>     what decides wether someone is a troll/spammer.

I don't think that is necessary; your physical health, or your job (or
classes, if you are a student) will eventually cause an auto-regulation
of your posts.  I encourage you to consider developing a
self-regulating style, though, so that other asppects of your life
don't suffer.

Newsgroups: Sorry for the cross-post that is off-topic to both of
these NGs; it is at least not off-topic to the subject line...

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Markus E.L. 2
Subject: Re: get a life
Date: 
Message-ID: <rrtzrbfbg5.fsf@hod.lan.m-e-leypold.de>
Duane Rettig wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
>
>> Tamas Papp wrote:
>>
>>> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>>>
>>>> Ulf Wiger wrote:
>>>>
>>>>>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>>>>>
>>>> [snip]
>>>
>>> When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
>>> this thread.  Get a life, stop spamming here.  Or at least kindly
>>> refrain from posting to c.l.l. about topics not relevant to this
>>> newsgroup.
>>
>> Ah, yes, the old "you're a spammer -- my news reader can't handle
>> that" argument. You might notice: I'm not the only one writing this
>> much about the very same topics on c.l.l. What I write are exclusively
>> answers to articles and answers to articles that have been posted to
>> c.l.l. at that.
>
> I've never considered you a spammer, 

Thanks.

> but I also had a similar reaction this morning (I was wondering
> where you found the time to answer every article on the thread, and

Yes, so did I. Actually my initial intention was (yesterday and today)
only to refute one misconception once and ndever again. After counting
my own posts I can soothe the people on c.l.l: It's not my intention
to continue. Some non-arguments crop up again and again all over the
place and I already said that I don't consider it my duty to hold
tutorials on stuff I think other people are sadly missing from there
repetoire (mind you: It's 2 different things wether one knows how
something works and wether he or she then actually does decide to
apply it: A number of participants in this threads have been meaking
replies that indicate to me that they lack in the former department
and I orginally thought I'd try to put that into perspective. Now I
see I can't stem it, so I won't try: Please people, don't construe my
(hopeful) future lack of replies on most of this unfortunate not as
aquiescence but rather as having given up to supply what some
curricula probably have failed to address).

> was wondering whether you had lost your job or something :-)

I'm between projects and quit office early this day :-).


> But then, as I read a couple of your replies that
> implied that you had no time for an answer,

> followed by _more_
> replies, I realized you appear to be caught in a typical trap that
> boils down to an "addiction to answering" (my coinage, as far as I
> know).  

There is some truth in that. Seeing the same misconceptions again and
again and suspecting that ones reply was not clearly enough formulated
one tries again. Actaully a disease perhaps non.native speakers are
more prone to, since we have to live with the suspicion that some of
our message doesn't make it beyond the language barrier.

> This means that you can't let an article go that you disagree
> with without answering, 

Not quite. I can't let articles go that are factually wrong (like that
about the semantics of badly typed languages). You'll find that I'm
quite accomondating on the subject of opinions.

> even if you've already answered an almost
> identical article before; I've seen several of these replies from you
> where the only thing you say is "I've answered this elsewhere" without
> adding any information to the article. 

Right. This was to refer from the sub thread to the examples I only
wanted to post once. It alos indicates that in some sub threads the
same false statements cropped up multiple times -- often by the same
people, so something that doesn't come from me alone (indeed the
references to something I already replied can be construed as an
attempt re-unify different sub-thread that ended at very similar
points).

> Perhaps this appearence is not
> accutrate, 

No, not quite. :-) I've counted myself to how much of the replies
addressed to me I replied myself and the rate is not so high :-).

> but it is the perception that I have.  

> It is really a question of asking yourself "Can I let this one go?"

You know, I think I can. Until before yesterday I thought it was
actually interesting and leading somehwere.  Yesterday I noticed the
repetitive insistence of some participants on the same wrong
propositions all over again and again to an extent that made me
actually doubt in some cases wether they had a degree in CS at all
(they have, never mind).


> - if the answer is "no" most of the time, then you have the
> addiction.

It's 'yes' most of the time :-).

> And the only
> reason I'm writing to you here is to warn you (and any others who
> might have the same addiction) that such an addiction is potentially
> harmful to your health; there have been others on newsgroups who have
> had health problems because of it.

Yeah, yeah. No offense taken, though. You'll find that if I got the
time, I'll often answer quite frequently and then drop from usenet
almost completely for a very long time.

>> I put it to everones consideration (especially the contributors from
>> c.l.l) to answer to c.l.f only, but please grant me the right to
>> answer on bothe groups to posts that have been crossposted to both
>> groups.

> But that cuts both ways.  Most people who feel they are answering
> autoritatively (note that I include myself in this category, since I
> am also corss-posting this, though I don't usually do so) will also
> continue the cross-post.  

That's the curse of threads that begin as crosspostings,
unfortunately. But you cannot expect (this is a question of
psyhcology) that one gets a wrong and sometimes insulting answer in
both groups and then only answers in one of them -- letting the answer
stand as if one yielded there.

Actually I've been moving some of the less controversial subthreads to
c.l.f completely (those where I didn't answer to a "but you're wrong
and deluded" statement).

> The only way to stop the cross-posting is to take the initiative
> yourself to redirect or to not answer at all.

:-). Exactly what I say. _I_ didn't have a problem with crossposting
and I'd ask you to note that it are c.l.l. readers that keep the
thread alive in c.l.l (if they didn't it would either have died ore
gone to c.l.f completely: Indeed a good part of my answers to people
which I know to be reading c.l.f (like Thant and Jo) have been
directed to c.l.f exclusively.

>
>> I also notice that you didn't try to call Rainer, Don and Rayiner
>> spammers.
>
> Again, I don't consider you a spammer, but the difference in

Yep. But then you didn't call them

> perception between them and you is because they are not answering
> every post, as far as I perceive.

Not do I.

> they seem to pick their battlegrounds more carefully.

No. They are just 3 or 4 people where I'm answering 3-4 people. I
think Rayiner (e.g.) seldom dropped a thread.



>> PS: You can down rate me at Google Groups. I recently heard that's
>>     what decides wether someone is a troll/spammer.
>
> I don't think that is necessary; 

Ah. That actually was intended to be irony refering back to the "Jon
is a Spammer" discussion and some of the arguments I heard then.

> your physical health, or your job (or

You know: I type very fast: It does take me only a really small part
of my time to write this many posts :-).

> classes, if you are a student) will eventually cause an auto-regulation
> of your posts.  

> I encourage you to consider developing a
> self-regulating style, though, so that other asppects of your life
> don't suffer.

Let that be my worry, right? As some people like to go for an evening
of heavy drinking (also not healthy :-) I sometimes like to go for an
evening of mailing and posting to usenet.

Regards -- Markus

(Last post in this thread: If you answer and it doesn't regard
netiquette or the news groups in general or is onT, please do by
personal mail.)
From: Ken Tilton
Subject: Re: get a life
Date: 
Message-ID: <F56ui.55$Fn.35@newsfe12.lga>
Markus E.L. 2 wrote:
> Duane Rettig wrote:
> 
> 
>>This means that you can't let an article go that you disagree
>>with without answering, 
> 
> 
> Not quite.

:)

kt
From: Thomas F. Burdick
Subject: Re: get a life
Date: 
Message-ID: <1186513694.983028.198520@k79g2000hse.googlegroups.com>
On Aug 7, 7:02 pm, ·····································@ANDTHATm-e-
leypold.de (Markus E.L. 2) wrote:
> Tamas Papp wrote:
> > ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>
> >> Ulf Wiger wrote:
>
> >>>>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:
>
> >> [snip]
>
> > When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
> > this thread.  Get a life, stop spamming here.  Or at least kindly
> > refrain from posting to c.l.l. about topics not relevant to this
> > newsgroup.
>
> Ah, yes, the old "you're a spammer -- my news reader can't handle
> that" argument.

A spammer?  No, a spammer you are not.  A blow-hard jackass loser who
spends all his time on usenet filling up newsgroups with irrelevant
crap to the point where said jackass actually questions the puissance
of others' *newsreaders* in response to his fecal onslaught ... that I
leave as an exercise to the reader.
From: Garry Hodgson
Subject: Re: get a life (was Re: shootout: implementing an interpreter for a	simple procedural language Minim)
Date: 
Message-ID: <2007081220301186965000@k2.sage.att.com>
Tamas Papp <······@gmail.com> wrote:

> When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
> this thread.  Get a life, stop spamming here.  Or at least kindly
> refrain from posting to c.l.l. about topics not relevant to this
> newsgroup.

i hope you've made the same request of the lispers spamming
comp.lang.functional.  i agree that this thread(s) desperately needs
to go away, but markus is not the only player.

----
Garry Hodgson, Senior Software Geek, AT&T CSO

nobody can do everything, but everybody can do something.
do something.
From: Tamas Papp
Subject: Re: get a life (was Re: shootout: implementing an interpreter for a simple procedural language Minim)
Date: 
Message-ID: <87tzr37uay.fsf@pu100877.student.princeton.edu>
Garry Hodgson <·····@sage.att.com> writes:

> Tamas Papp <······@gmail.com> wrote:
>
>> When I opened my newsreader, I saw 40+ messages from you on c.l.l. in
>> this thread.  Get a life, stop spamming here.  Or at least kindly
>> refrain from posting to c.l.l. about topics not relevant to this
>> newsgroup.
>
> i hope you've made the same request of the lispers spamming
> comp.lang.functional.  i agree that this thread(s) desperately needs
> to go away, but markus is not the only player.

I don't read c.l.f, so I leave this to those who do.

Tamas
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xzm1394qh.fsf@ruckus.brouhaha.com>
Ulf Wiger <·······@cbe.ericsson.se> writes:
> While I agree that using a failover scheme is the only way to 
> cover all common SW upgrade scenarios, being able to patch a 
> live system can be extremely useful. We use it extensively during
> testing and debugging. Patching e.g. an instrumented module this
> way takes only a couple of seconds, while a redundancy upgrade
> (esp. in our systems) is quite an undertaking(*)....
> Using hot-loading of code is also extremely useful at interops,
> where Erlang programmers often debug and correct errors without
> even re-establishing the signaling links, much less restarting 
> the system. To those with more traditional technology, this seems
> more or less like magic.

This does sound useful, but not especially reliant on dynamic types.
gdb injects some simple code into running C programs to allow the user
to call program functions from the debugger command line.  And of course
Java can dynamically load class files and fully type check them.
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <xcztzr35co0.fsf@cbe.ericsson.se>
>>>>> "PR" == Paul Rubin <·············@NOSPAM.invalid> writes:

  PR> Ulf Wiger <·······@cbe.ericsson.se> writes:
  >>  While I agree that using a failover scheme is the only way to
  >>  cover all common SW upgrade scenarios, being able to patch a
  >>  live system can be extremely useful. We use it extensively
  >>  during testing and debugging. Patching e.g. an instrumented
  >>  module this way takes only a couple of seconds, while a
  >>  redundancy upgrade (esp. in our systems) is quite an
  >>  undertaking(*)....  Using hot-loading of code is also extremely
  >>  useful at interops, where Erlang programmers often debug and
  >>  correct errors without even re-establishing the signaling links,
  >>  much less restarting the system. To those with more traditional
  >>  technology, this seems more or less like magic.

  PR> This does sound useful, but not especially reliant on dynamic
  PR> types.  gdb injects some simple code into running C programs to
  PR> allow the user to call program functions from the debugger
  PR> command line.  And of course Java can dynamically load class
  PR> files and fully type check them.

I specifically left out any claims that this would be an argument
for dynamic types. My tool is Erlang, it has excellent support 
for upgrading on the fly; it is dynamically typed. The latter
two statements are not necessarily tightly connected. While I
would suspect that some aspects of dynamic code loading are 
certainly easier with dynamic typing, I can't claim that static
typing makes it undoable.

I simply wanted to stress how useful hot code loading is. It isn't
just a cop-out for those who are too lazy to use proper types to
begin with. (:

BTW, while Java supposedly supports code loading on the fly, it 
does seem to stop well short of supporting live upgrade of complex
software, at least according to the accounts I've heard.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8tp7i$k4s$3@online.de>
Raffael Cavallaro schrieb:
> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen J. 
> Bevan) said:
> 
>> You can with any language, it is a matter of the cost:benefit ratio.
>> Lisp, along with other dynamic languages such Smalltalk, make *some* of
>> the costs negligible but has no impact on others.
> 
> I think you miss the point here: with a true dynamic language you can 
> perform an upgrade on a running server, while it is still running, 
> without bringing it down.

Then Alice ML is a true dynamic language.
(Despite being statically typed...)

Regards,
Jo
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007080221210322503-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-08-02 19:27:07 -0400, Joachim Durchholz <··@durchholz.org> said:

> Then Alice ML is a true dynamic language.
> (Despite being statically typed...)

Not familiar with Alice ML - can you do this as well?:

On 2007-08-02 10:57:03 -0400, Kent M Pitman <······@nhplace.com> said:

> Including migrating already-instantiated data of a given class to
> accommodate a changed class definition under normal, object-oriented
> program control.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9352j$9l9$2@online.de>
Raffael Cavallaro schrieb:
> On 2007-08-02 19:27:07 -0400, Joachim Durchholz <··@durchholz.org> said:
> 
>> Then Alice ML is a true dynamic language.
>> (Despite being statically typed...)
> 
> Not familiar with Alice ML - can you do this as well?:
> 
> On 2007-08-02 10:57:03 -0400, Kent M Pitman <······@nhplace.com> said:
> 
>> Including migrating already-instantiated data of a given class to
>> accommodate a changed class definition under normal, object-oriented
>> program control.

Don't know.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b2699a$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Then Alice ML is a true dynamic language.
> (Despite being statically typed...)

Like F# and most other static languages...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <87fy31l38h.fsf@dnsalias.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> On 2007-08-02 10:14:30 -0400, ·······@dino.dnsalias.com (Stephen
> J. Bevan) said:
>
>> You can with any language, it is a matter of the cost:benefit ratio.
>> Lisp, along with other dynamic languages such Smalltalk, make *some* of
>> the costs negligible but has no impact on others.
>
> I think you miss the point here: with a true dynamic language you can
> perform an upgrade on a running server, while it is still running,
> without bringing it down.

You can do that with any language, I've done it with servers written
in C.  The critical difference isn't whether it can be done or not but
is how easy it is to do it[1].  However, ease is only the dominating
issue if one decides that the live updates approach is actually a cost
effective solution.  Your argument seems to be predicated on that.  My
previous emails were an attempt to explain that even if you can do
live updates, it isn't necessarily the most cost effective solution.
The cost here is not the measure of how easy/difficult it is to write
a live update due to the language choice, rather the he cost is the
cost of making a live update which ends up having negative impact on
the system and having to fix the damage.  If this cost is negligible
then sure, go for the live update if it fails patch it again until it
works.  However, if the cost is going to be measured in $$$ for every
minute the server is broken then you have to be damn sure the live
update is good and/or you have a strategy for instantly reverting it
(not writing another live update that's too late, it has to be a
revert).  Consequently if $$$ is involved or I'm going to get the call
if the system breaks I'd avoid live updates in the style you are
advocating and go with the multiple servers with failover, it has a
much better worst case behaviour (and would avoid a call at 3AM :-)

---------------------------

[1] In C you need to plan ahead and call some functions via function
    pointers to allow patching when you dlopen the .so containing the
    revised code.  Whether you need to do something equivalent in Lisp
    depends on your environment e.g. if you assume you have a full
    compiler available (not a good idea on a firewall, not enough
     space :-)  then you don't need to plan ahead and you have a
    "truly dynamic language" (well implementation really).  If you
    can't make that assumption then you'll need to call some functions
    via symbols (or locations, or whatever the most appropriate
    mechanism is to get the necessary patching ability).  In Erlang,
    if you plan to make use of live update you'll need to plan ahead
    and have some function/method calls use a slightly different
    notation which will allow that call to jump to the new function
    when it is loaded.  Perhaps that means Erlang, like C, is not a
    "truly dynamic language".
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007080316092782327-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-08-02 23:13:35 -0400, ·······@dino.dnsalias.com (Stephen J. 
Bevan) said:

> You can do that with any language, I've done it with servers written
> in C.  The critical difference isn't whether it can be done or not but
> is how easy it is to do it[1].

I think the point is that in a language like c, to have the equivalent 
of update-instance-for-redefined-class called automatically by the 
runtime, you essentially have to greenspun a whole lisp/clos in c.
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <87vebwj9to.fsf@dnsalias.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> On 2007-08-02 23:13:35 -0400, ·······@dino.dnsalias.com (Stephen
> J. Bevan) said:
>
>> You can do that with any language, I've done it with servers written
>> in C.  The critical difference isn't whether it can be done or not but
>> is how easy it is to do it[1].
>
> I think the point is that in a language like c, to have the equivalent
> of update-instance-for-redefined-class called automatically by the
> runtime, you essentially have to greenspun a whole lisp/clos in c.

I think the point is a conjunction of :-

  1. live updating is a good solution to problem X.
  2. Lisp makes live updating simpler than non-dynamic languages for
     problem X.

The truth of 2 is only of academic interest to me unless one can make
a case for 1.  I think I have good reasons why 1 is not true for some
values of X that I care about and thus 2 doesn't apply.  Other than
some Erlang[1] examples I haven't seen much in the way of live updates
examples of 1 in this thread.  The closest I come to live update on a
regular basis is loading new code into Emacs or loading a module into
the Linux kernel.  The former doesn't make use of any kind of
update-instance-for-redefined-class despite being Lisp and the latter
is only updating a well-defined set of things and so has no need of it
and thus does not come anywhere near Greenspun.  If there are live
update examples out there that make use of
update-instance-for-redefined-class, let's hear about them.

---------------------

[1] Which doesn't have an equivalent of
    update-instance-for-redefined-class and so is no help in making a
    case for 2.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186198456.319183.32880@k79g2000hse.googlegroups.com>
> The truth of 2 is only of academic interest to me unless one can make
> a case for 1.  

I'm not directly familiar with any Lisp software that uses this
feature (though I'm sure its used), but live patching is an important
feature for some markets. For example, live patching (of processes and
the kernel) is being added to Linux to meet the requirements for the
Carrier Grade Linux specification. In C, live patching can't upgrade
existing data, but this is just a limitation of the language, not an
implication that the feature is not useful. Live patches have to be
carefully planned, and obviously its easier to make the process more
robust if you can modify the data format along with the code.
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <877iobjql6.fsf@dnsalias.com>
Rayiner Hashem <·······@gmail.com> writes:
>> The truth of 2 is only of academic interest to me unless one can make
>> a case for 1.  
>
> I'm not directly familiar with any Lisp software that uses this
> feature (though I'm sure its used), but live patching is an important
> feature for some markets. For example, live patching (of processes and
> the kernel) is being added to Linux to meet the requirements for the
> Carrier Grade Linux specification.

Can point at a Linux system that satisfies the Carrier Grade Linux
requirements with respect to live patching of the kernel and is
actually deployed?  That is, the fact that a group writes a document
saying something is important is moot unless someone follows through
with an implementation that satisfies the spec.


> In C, live patching can't upgrade existing data,

I assume you mean "upgrade in place" since it is clearly possible to
copy the data to a new structure.  For example you can live update a
firewall session table written in C by marking the old table as dirty
and then each time an entry is used in the old table, it is
copied&upgraded to the new session table.  Eventually all the old
sessions will be upgraded or they time out at which point the old
table can be removed along with the code that supported it.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186240976.508893.6480@i13g2000prf.googlegroups.com>
> Can point at a Linux system that satisfies the Carrier Grade Linux
> requirements with respect to live patching of the kernel and is
> actually deployed?  That is, the fact that a group writes a document
> saying something is important is moot unless someone follows through
> with an implementation that satisfies the spec.

First, it's not just any group, it's OSDL. Second, there are multiple
vendors who support CGL 3.2+ (the version that included the live
patching requirement). Montavista's Carrier Grade Linux supports CGL
4.0, and WindRiver's Carrier Grade Linux supports CGL 3.2. WindRiver's
CGL distro will be launched into space in 2009.

> I assume you mean "upgrade in place" since it is clearly possible to
> copy the data to a new structure.  For example you can live update a
> firewall session table written in C by

That's not a general solution, since copying the data to a new
structure invalidates existing pointers to it.
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87y7gqiude.fsf@dnsalias.com>
Rayiner Hashem <·······@gmail.com> writes:
>> Can point at a Linux system that satisfies the Carrier Grade Linux
>> requirements with respect to live patching of the kernel and is
>> actually deployed?  That is, the fact that a group writes a document
>> saying something is important is moot unless someone follows through
>> with an implementation that satisfies the spec.
>
> First, it's not just any group, it's OSDL.

First, OSDL is just a group, their pronouncments are not imbuded with
any special status.

> Second, there are multiple
> vendors who support CGL 3.2+ (the version that included the live
> patching requirement). Montavista's Carrier Grade Linux supports CGL
> 4.0, and WindRiver's Carrier Grade Linux supports CGL 3.2.

Second, neither company has publically registered for CGL 4.0 which
includes AVL 27.0 (Kernel Live Patching) and via Google I can't find
anything which says either company supports it.  I can find their CGL
3.2 registrations[1,2] and they omit support for a number of AVLs
while still claiming to support CGL 3.2.  Thus while I have no
evidence that either company does not support AVL 27.0 I do have
evidence that they "supported" 3.2 without actually supporting all the
AVLs.  So, even if they claim support for GCL 3.2+ that doesn't to me
imply that they actually support AVL 27.0.  If they do claim support
for AVL 27.0 (where?) then I'd dearly love to know how, for example,
they support me live updating the kernel with a (four line) fix for a
routing cache entry leak in the IPv4 stack while also ensuring that
if/when the next reboot occurs that patch will be in the kernel that
boots.  Not that I'm saying it is impossible, only that I don't think
it is cost effective.  Of course if the customer is willing to pay for
it, then sure why not.  However, the "if" in the last sentence is
important.  The existance of AVL 27.0 only implies that someone
wants it, not that they are willing to pay the asking price for it.


>> I assume you mean "upgrade in place" since it is clearly possible to
>> copy the data to a new structure.  For example you can live update a
>> firewall session table written in C by
>
> That's not a general solution, since copying the data to a new
> structure invalidates existing pointers to it.

Of course it is not a general solution, but if it is good enough for
the (paying) customer that's all that matters.

---------------------

[1] Windriver CGL 3.2 registration 
    http://www.windriver.com/products/platforms/wrlinux13-cgl-registration/wrlinux-1_3-CGL-3_2-registration-document.pdf

[2] http://www.mvista.com/products/cge/cgl/availability.php
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186282054.817072.15350@k79g2000hse.googlegroups.com>
> Second, neither company has publically registered for CGL 4.0 which
> includes AVL 27.0 (Kernel Live Patching) and via Google I can't find
> anything which says either company supports it.

Sorry, Montavista supports CGL 3.2. Their registration noting support
for live-patching is here: http://www.mvista.com/products/cge/cgl/availability.php

--- from registration ----

AVL.10.0 - Live Patching

Requirement ID                 : AVL.10.0
Requirement Name               : Live Patching
Requirement Category           : Availability
Required by OSDL CGL 3.2       : Yes
Supported by MontaVista CGE4.0 : Yes
Proof of Concept               : RPM
RPM                            : fsad-1.1.3
Comment                        :

Name        : fsad                         Relocations: /opt/
montavista/cge/devkit/x86/pentium3/target
Version     : 1.1.3                             Vendor: MontaVista
Software, Inc.
Release     : 3.2.9.0600940                 Build Date: Thu Jun 29
17:07:28 2006
Install Date: Sun Jul  2 05:09:16 2006      Build Host:
node-31.borg.mvista.com
Group       : base                          Source RPM:
fsad-1.1.3-3.2.9.0600940.src.rpm
Size        : 236781                           License: MontaVista
Proprietary
Signature   : (none)
Packager    :
Summary     : Field Safe Application Debugger and Runtime Patcher
Description :
FSAD provides a debugger that can be linked in to the application and
can
be run on a live system in the field with minimal impact to the
application.
This way, field problems can be debugged.  It also has a way to patch
the
application while it is running, so that most bugs can be repaired
without
stopping the application.

--- end snip ---
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87lkcqirzl.fsf@dnsalias.com>
Rayiner Hashem <·······@gmail.com> writes:

>> Second, neither company has publically registered for CGL 4.0 which
>> includes AVL 27.0 (Kernel Live Patching) and via Google I can't find
>> anything which says either company supports it.
>
> Sorry, Montavista supports CGL 3.2. Their registration noting support
> for live-patching is here: http://www.mvista.com/products/cge/cgl/availability.php
>

I know Montavista supports AVL 10.0 (live patching of user-level
programs) but patching user-level processes is not what the (sub)
thread is about.  In an earlier message you wrote :-

  For example, live patching (of processes and
  the kernel) is being added to Linux to meet the requirements for the
  Carrier Grade Linux specification.

I followed up asking :-

  Can point at a Linux system that satisfies the Carrier Grade Linux
  requirements with respect to live patching of the kernel and is
  actually deployed?

I specifically only mention the kernel and so AVL 10.0 is
irrelevant. It is support for AVL 27.0 that is required.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <echcne619p.fsf@hod.lan.m-e-leypold.de>
'stephen AT dino DOT dnsalias DOT com (Stephen J DOT Bevan)' wrote:

> Rayiner Hashem <·······@gmail.com> writes:
>>> Can point at a Linux system that satisfies the Carrier Grade Linux
>>> requirements with respect to live patching of the kernel and is
>>> actually deployed?  That is, the fact that a group writes a document
>>> saying something is important is moot unless someone follows through
>>> with an implementation that satisfies the spec.
>>
>> First, it's not just any group, it's OSDL.
>
> First, OSDL is just a group, their pronouncments are not imbuded with
> any special status.
>
>> Second, there are multiple
>> vendors who support CGL 3.2+ (the version that included the live
>> patching requirement). Montavista's Carrier Grade Linux supports CGL
>> 4.0, and WindRiver's Carrier Grade Linux supports CGL 3.2.
>
> Second, neither company has publically registered for CGL 4.0 which
> includes AVL 27.0 (Kernel Live Patching) and via Google I can't find
> anything which says either company supports it.  I can find their CGL
> 3.2 registrations[1,2] and they omit support for a number of AVLs
> while still claiming to support CGL 3.2.  Thus while I have no
> evidence that either company does not support AVL 27.0 I do have
> evidence that they "supported" 3.2 without actually supporting all the
> AVLs.  So, even if they claim support for GCL 3.2+ that doesn't to me
> imply that they actually support AVL 27.0.  If they do claim support
> for AVL 27.0 (where?) then I'd dearly love to know how, for example,
> they support me live updating the kernel with a (four line) fix for a
> routing cache entry leak in the IPv4 stack while also ensuring that
> if/when the next reboot occurs that patch will be in the kernel that
> boots.  Not that I'm saying it is impossible, only that I don't think
> it is cost effective.  

Have to say "me too" here. Regardless of what Ulf Wigner wrote,
redundant systems and failovers are probably cheaper in many
scenarios. My impression with specifications like this (that look
basically undoable or really terribly expensive (and easily breakable
too druing further development)) is, that they come from the wishlist
of a big committee: And we all know the relationship between what the
comittee writes down as indispensable and what is really needed in the
field.


> Of course if the customer is willing to pay for
> it, then sure why not.  However, the "if" in the last sentence is
> important.  The existance of AVL 27.0 only implies that someone
> wants it, not that they are willing to pay the asking price for it.

Exactly :-). 

Regards .. Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <l9lsi.30279$F23.343250@phobos.telenet-ops.be>
Stephen J. Bevan wrote:
> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>> No one doubts that *after* parsing inputs one can have a statically
>> typed expression to evaluate. But you still need to do run-time checks
>> (i.e., the input parsing) and you must work to avoid forcing the users
>> into a narrow input channel - making them choose only a narrow subset
>> of possible inputs because that's all your parser and statically type
>> checked code were designed to handle. And what happens when you wan't
>> to add more valid input types to a running system?
> 
> I don't know what type of running system you mean so I'll use an
> example of a IP firewall.  

I have another example.

I've been working for telephone carrier companies. These guys have crazy 
pricing schemes. Also they need to process millions of calls every day. 
They need to test pricing policies on huge data sets and get results fast.

Sometimes a price is as simple as a price per second or per minute. 
sometimes it's a crazy thing requiring many columns with crazy formulas.

The price "object" (or type) for a destination is then different for 
each policy (and sometimes for each customer).

I can't have them call me each time they decide to run a simulation on 
some new pricing scheme.

Of course you could compose with objects or with functions, but really 
the best way to go about it would be to let them write their formulas 
(in some language they can understand, possibly with an interface to 
help them), and let the program compile these to machine code.

There you have a system with variable types and variable code which need 
to be plugged in at a user's whim.

Of course it is possible to do it statically, but you need to think 
about all they will ever want to do.

Sacha
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <kxzm19ocgp.fsf@hod.lan.m-e-leypold.de>
Sacha wrote:

> Stephen J. Bevan wrote:
>> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>>> No one doubts that *after* parsing inputs one can have a statically
>>> typed expression to evaluate. But you still need to do run-time checks
>>> (i.e., the input parsing) and you must work to avoid forcing the users
>>> into a narrow input channel - making them choose only a narrow subset
>>> of possible inputs because that's all your parser and statically type
>>> checked code were designed to handle. And what happens when you wan't
>>> to add more valid input types to a running system?
>> 
>> I don't know what type of running system you mean so I'll use an
>> example of a IP firewall.  
>
> I have another example.
>
> I've been working for telephone carrier companies. These guys have crazy 
> pricing schemes. Also they need to process millions of calls every day. 
> They need to test pricing policies on huge data sets and get results fast.
>
> Sometimes a price is as simple as a price per second or per minute. 
> sometimes it's a crazy thing requiring many columns with crazy formulas.
>
> The price "object" (or type) for a destination is then different for 
> each policy (and sometimes for each customer).
>
> I can't have them call me each time they decide to run a simulation on 
> some new pricing scheme.
>
> Of course you could compose with objects or with functions, but really 
> the best way to go about it would be to let them write their formulas 
> (in some language they can understand, possibly with an interface to 
> help them), and let the program compile these to machine code.
>
> There you have a system with variable types and variable code which need 
> to be plugged in at a user's whim.
>
> Of course it is possible to do it statically, but you need to think 
> about all they will ever want to do.

No, I don't think so: That's what (a) OO with polymorphism or (b)
simple function tables where invented for. Note thta your 'pricing
scheme' is somehow handled by your code. That implies a
contract. Express that contract by giving a set of functions or
methods (and interface) which your code calls to make use of the
contract (i.e. the service it needs from the pricing scheme). This (a
interface / class type / set of functions) is what your code needs to
know. The rest is outside the domain of you code. Price schemes are
manufactured elsewhere and passed to your code for processing.

(I hope that was clear -- of course creating a new pricing scheme
requires a new class to be defined: Dependent on the language there
are various way to insert it into a programm, even
dynamically. Plugins is the keyword, most languages can load code
dynamically, even statically typed langauges (note that the new parts
of the type, the extensions to the interface are in the new code and
not visible to the already running code)).

Regards -- Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <4bEsi.31919$Nu.1201205@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> Sacha wrote:
> 
>> Stephen J. Bevan wrote:
>>> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>>>> No one doubts that *after* parsing inputs one can have a statically
>>>> typed expression to evaluate. But you still need to do run-time checks
>>>> (i.e., the input parsing) and you must work to avoid forcing the users
>>>> into a narrow input channel - making them choose only a narrow subset
>>>> of possible inputs because that's all your parser and statically type
>>>> checked code were designed to handle. And what happens when you wan't
>>>> to add more valid input types to a running system?
>>> I don't know what type of running system you mean so I'll use an
>>> example of a IP firewall.  
>> I have another example.
>>
>> I've been working for telephone carrier companies. These guys have crazy 
>> pricing schemes. Also they need to process millions of calls every day. 
>> They need to test pricing policies on huge data sets and get results fast.
>>
>> Sometimes a price is as simple as a price per second or per minute. 
>> sometimes it's a crazy thing requiring many columns with crazy formulas.
>>
>> The price "object" (or type) for a destination is then different for 
>> each policy (and sometimes for each customer).
>>
>> I can't have them call me each time they decide to run a simulation on 
>> some new pricing scheme.
>>
>> Of course you could compose with objects or with functions, but really 
>> the best way to go about it would be to let them write their formulas 
>> (in some language they can understand, possibly with an interface to 
>> help them), and let the program compile these to machine code.
>>
>> There you have a system with variable types and variable code which need 
>> to be plugged in at a user's whim.
>>
>> Of course it is possible to do it statically, but you need to think 
>> about all they will ever want to do.
> 
> No, I don't think so: That's what (a) OO with polymorphism or (b)
> simple function tables where invented for. Note thta your 'pricing
> scheme' is somehow handled by your code. That implies a
> contract. Express that contract by giving a set of functions or
> methods (and interface) which your code calls to make use of the
> contract (i.e. the service it needs from the pricing scheme). This (a
> interface / class type / set of functions) is what your code needs to
> know. The rest is outside the domain of you code. Price schemes are
> manufactured elsewhere and passed to your code for processing.
> 
> (I hope that was clear -- of course creating a new pricing scheme
> requires a new class to be defined: Dependent on the language there
> are various way to insert it into a programm, even
> dynamically. Plugins is the keyword, most languages can load code
> dynamically, even statically typed langauges (note that the new parts
> of the type, the extensions to the interface are in the new code and
> not visible to the already running code)).
> 
> Regards -- Markus
> 

I went that route for 10 years.
I had good success too. Still you have no idea on how ..err.. "creative" 
these guys are when it's about getting money from their customers =P
I very often had to add new ideas and rewrite part of the existing 
structures to accommodate these. You can of course tell that's because 
of a bad design, which wouldn't be very nice !

Sacha
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <czzm167hw0.fsf@hod.lan.m-e-leypold.de>
Sacha wrote:

> Markus E.L. 2 wrote:
>> Sacha wrote:
>> 
>>> Stephen J. Bevan wrote:
>>>> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>>>>> No one doubts that *after* parsing inputs one can have a statically
>>>>> typed expression to evaluate. But you still need to do run-time checks
>>>>> (i.e., the input parsing) and you must work to avoid forcing the users
>>>>> into a narrow input channel - making them choose only a narrow subset
>>>>> of possible inputs because that's all your parser and statically type
>>>>> checked code were designed to handle. And what happens when you wan't
>>>>> to add more valid input types to a running system?
>>>> I don't know what type of running system you mean so I'll use an
>>>> example of a IP firewall.  
>>> I have another example.
>>>
>>> I've been working for telephone carrier companies. These guys have crazy 
>>> pricing schemes. Also they need to process millions of calls every day. 
>>> They need to test pricing policies on huge data sets and get results fast.
>>>
>>> Sometimes a price is as simple as a price per second or per minute. 
>>> sometimes it's a crazy thing requiring many columns with crazy formulas.
>>>
>>> The price "object" (or type) for a destination is then different for 
>>> each policy (and sometimes for each customer).
>>>
>>> I can't have them call me each time they decide to run a simulation on 
>>> some new pricing scheme.
>>>
>>> Of course you could compose with objects or with functions, but really 
>>> the best way to go about it would be to let them write their formulas 
>>> (in some language they can understand, possibly with an interface to 
>>> help them), and let the program compile these to machine code.
>>>
>>> There you have a system with variable types and variable code which need 
>>> to be plugged in at a user's whim.
>>>
>>> Of course it is possible to do it statically, but you need to think 
>>> about all they will ever want to do.
>> 
>> No, I don't think so: That's what (a) OO with polymorphism or (b)
>> simple function tables where invented for. Note thta your 'pricing
>> scheme' is somehow handled by your code. That implies a
>> contract. Express that contract by giving a set of functions or
>> methods (and interface) which your code calls to make use of the
>> contract (i.e. the service it needs from the pricing scheme). This (a
>> interface / class type / set of functions) is what your code needs to
>> know. The rest is outside the domain of you code. Price schemes are
>> manufactured elsewhere and passed to your code for processing.
>> 
>> (I hope that was clear -- of course creating a new pricing scheme
>> requires a new class to be defined: Dependent on the language there
>> are various way to insert it into a programm, even
>> dynamically. Plugins is the keyword, most languages can load code
>> dynamically, even statically typed langauges (note that the new parts
>> of the type, the extensions to the interface are in the new code and
>> not visible to the already running code)).
>> 
>> Regards -- Markus
>> 
>
> I went that route for 10 years.
> I had good success too. Still you have no idea on how ..err.. "creative" 
> these guys are when it's about getting money from their customers =P
> I very often had to add new ideas and rewrite part of the existing 
> structures to accommodate these. You can of course tell that's because 
> of a bad design, which wouldn't be very nice !


You miss the point here: If youre code (that handles the pricing
scheme) has no contract to rely on, it cannot fullfill it's purpose
(or will have to be changed all the time). If it has, the contract can
be abstracted as a number of call backs.

Regards .. Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w6uti.36324$%03.1252460@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> Sacha wrote:
> 
>> Markus E.L. 2 wrote:
>>> Sacha wrote:
>>>
>>>> Stephen J. Bevan wrote:
>>>>> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>>>>>> No one doubts that *after* parsing inputs one can have a statically
>>>>>> typed expression to evaluate. But you still need to do run-time checks
>>>>>> (i.e., the input parsing) and you must work to avoid forcing the users
>>>>>> into a narrow input channel - making them choose only a narrow subset
>>>>>> of possible inputs because that's all your parser and statically type
>>>>>> checked code were designed to handle. And what happens when you wan't
>>>>>> to add more valid input types to a running system?
>>>>> I don't know what type of running system you mean so I'll use an
>>>>> example of a IP firewall.  
>>>> I have another example.
>>>>
>>>> I've been working for telephone carrier companies. These guys have crazy 
>>>> pricing schemes. Also they need to process millions of calls every day. 
>>>> They need to test pricing policies on huge data sets and get results fast.
>>>>
>>>> Sometimes a price is as simple as a price per second or per minute. 
>>>> sometimes it's a crazy thing requiring many columns with crazy formulas.
>>>>
>>>> The price "object" (or type) for a destination is then different for 
>>>> each policy (and sometimes for each customer).
>>>>
>>>> I can't have them call me each time they decide to run a simulation on 
>>>> some new pricing scheme.
>>>>
>>>> Of course you could compose with objects or with functions, but really 
>>>> the best way to go about it would be to let them write their formulas 
>>>> (in some language they can understand, possibly with an interface to 
>>>> help them), and let the program compile these to machine code.
>>>>
>>>> There you have a system with variable types and variable code which need 
>>>> to be plugged in at a user's whim.
>>>>
>>>> Of course it is possible to do it statically, but you need to think 
>>>> about all they will ever want to do.
>>> No, I don't think so: That's what (a) OO with polymorphism or (b)
>>> simple function tables where invented for. Note thta your 'pricing
>>> scheme' is somehow handled by your code. That implies a
>>> contract. Express that contract by giving a set of functions or
>>> methods (and interface) which your code calls to make use of the
>>> contract (i.e. the service it needs from the pricing scheme). This (a
>>> interface / class type / set of functions) is what your code needs to
>>> know. The rest is outside the domain of you code. Price schemes are
>>> manufactured elsewhere and passed to your code for processing.
>>>
>>> (I hope that was clear -- of course creating a new pricing scheme
>>> requires a new class to be defined: Dependent on the language there
>>> are various way to insert it into a programm, even
>>> dynamically. Plugins is the keyword, most languages can load code
>>> dynamically, even statically typed langauges (note that the new parts
>>> of the type, the extensions to the interface are in the new code and
>>> not visible to the already running code)).
>>>
>>> Regards -- Markus
>>>
>> I went that route for 10 years.
>> I had good success too. Still you have no idea on how ..err.. "creative" 
>> these guys are when it's about getting money from their customers =P
>> I very often had to add new ideas and rewrite part of the existing 
>> structures to accommodate these. You can of course tell that's because 
>> of a bad design, which wouldn't be very nice !
> 
> 
> You miss the point here: If youre code (that handles the pricing
> scheme) has no contract to rely on, it cannot fullfill it's purpose
> (or will have to be changed all the time). If it has, the contract can
> be abstracted as a number of call backs.
> 
> Regards .. Markus
> 

I have to agree with you. There are many ways to tackle such problems 
though, I eventually settled for the compiled, smallish, easy, quick and 
dirty but doing the job language, with a "no-brainer" helper gui for 
those cases I knew about.

Sacha
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <87abtedsuy.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> I've written a lot more Lisp code than I've written Haskell or ML, so
> maybe what I'm hoping for from static types is wishful thinking.  But
> I've made way more runtime type errors in Lisp than I'm happy about,
> that could have been caught at compile time with a static language.

May I ask which implementation you used back then?  CMUCL/SBCL (and also
Allegro) do already quite some type inference (and spit out corresponding
warnings), so that I believe that I do not miss very many type errors, at
least compared with my previous programming in C.

Yours,
Nicolas
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xd4yadmb1.fsf@ruckus.brouhaha.com>
Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> May I ask which implementation you used back then?  CMUCL/SBCL (and also
> Allegro) do already quite some type inference (and spit out corresponding
> warnings), so that I believe that I do not miss very many type errors, at
> least compared with my previous programming in C.

Ehh, not such fancy implementations, I'm afraid.  KCL, Emacs Lisp,
another small Lisp that I wrote myself, and a few small Scheme
systems.  More recently I selected Hedgehog Lisp for an embedded
project that ended up getting cancelled but Hedgehog clearly faced the
same issue.  The developers have put static annotation and checking on
their roadmap.  And although it's not Lisp, I use Python a lot and
deal with runtime type errors all the time.  Python's roadmap as well
has some notions of future static typechecking features.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <871wepesch.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> > May I ask which implementation you used back then?  CMUCL/SBCL (and also
> > Allegro) do already quite some type inference (and spit out corresponding
> > warnings), so that I believe that I do not miss very many type errors, at
> > least compared with my previous programming in C.
> 
> Ehh, not such fancy implementations, I'm afraid.  KCL, Emacs Lisp,
> another small Lisp that I wrote myself, and a few small Scheme
> systems.

Then you should definitely give SBCL a try (or Allegro, if you don't want
to invest much time installing a IDE).  Probably, those implementation do
already 90% of the typechecking you missed before.

Nicolas
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <m2bqducvdq.fsf@my.address.elsewhere>
Raffael Cavallaro
<················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> partially known range of inputs -> combination of human interactive
> and algorithmic processing -> unpredictable range of outputs
>
>
> If this is the future of computing, then the focus on static typing is
> a massive effort in solving the wrong problem.

I, for one, think that research in static typing is exactly the
/right/ effort.  In my experience (and I do have experience with both
paradigms), I find it easier and faster to perform "exploratory"
programming with a static type system to help me along.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xbqdu8mju.fsf@ruckus.brouhaha.com>
Matthias Blume <····@my.address.elsewhere> writes:
> I, for one, think that research in static typing is exactly the
> /right/ effort.  In my experience (and I do have experience with both
> paradigms), I find it easier and faster to perform "exploratory"
> programming with a static type system to help me along.

See for example this paper about using Haskell's type system to figure
out how MapReduce can work: http://www.cs.vu.nl/~ralf/MapReduce/
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073001440043042-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 01:29:05 -0400, Matthias Blume <····@my.address.elsewhere> said:

> In my experience (and I do have experience with both
> paradigms), I find it easier and faster to perform "exploratory"
> programming with a static type system to help me along.

But the static type system cannot help you with the fact that in an 
interactive setting you cannot know the nature of the inputs (as humans 
can be so clever), you cannot fully know the nature of the interactive 
processing (ditto), and therefore cannot hope to know the nature of the 
outputs (excluding the obvious reductio ad absurdum union type of 
"bag-of-bits-of-unknow-type/types"). You'll have to check all of this 
at runtime anyway.

You find static typing a support. I find it a hinderance. This is a 
matter of personal taste (and I've said elswhere in this thread that 
the relative popularities of c.l.l. and c.l.f. say somthing about where 
most programmers' tastes in this matter lie). My main point is not 
about taste, but about the liklihood that, for interactive computing, 
where the future increasingly lies, the research into static typing has 
been a major research effort in the wrong direction.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7x7ioi8mh8.fsf@ruckus.brouhaha.com>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> But the static type system cannot help you with the fact that in an
> interactive setting you cannot know the nature of the inputs (as
> humans can be so clever), you cannot fully know the nature of the
> interactive processing (ditto),

The nature of the inputs and outputs is determined by the hardware:
keyboard inputs, mouse clicks, network connections, etc.  These can
all be described by static types.
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <m23az6csxc.fsf@my.address.elsewhere>
Raffael Cavallaro
<················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> On 2007-07-30 01:29:05 -0400, Matthias Blume <····@my.address.elsewhere> said:
>
>> In my experience (and I do have experience with both
>> paradigms), I find it easier and faster to perform "exploratory"
>> programming with a static type system to help me along.
>
> But the static type system cannot help you with the fact that in an
> interactive setting you cannot know the nature of the inputs (as
> humans can be so clever),

User input is handled by parsers, not by type systems.  Parsers work
the same way in statically and dynamically typed settings.  If your
code is prepared for certain inputs, then this can be expressed in
types.  A type is just the (static) expression of a program
invariant.

> you cannot fully know the nature of the interactive processing (ditto),

If this were true, you couldn't write any program to deal with
interactive processing.  After all, your code (be in statically typed
or dynamically typed) must be prepared to handle all interactions.
Again, static types merely describe (and let a compiler enforce) the
invariants that are inherent to such code.

> You find static typing a support. I find it a hinderance.

How much statically typed code (using a modern type system) have you
written?

> This is a matter of personal taste (and I've said elswhere in this
> thread that the relative popularities of c.l.l. and c.l.f. say
> somthing about where most programmers' tastes in this matter lie).

This is one point we can agree on.

> My main point is not about taste, but about the liklihood that, for
> interactive computing, where the future increasingly lies, the
> research into static typing has been a major research effort in the
> wrong direction.

I understand your point, but I do not agree with it.  In fact, I think
that likelihood to be /extremely/ low.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7rd4ya1aga.fsf@hod.lan.m-e-leypold.de>
> On 2007-07-30 01:29:05 -0400, Matthias Blume <····@my.address.elsewhere> said:
>
>> In my experience (and I do have experience with both
>> paradigms), I find it easier and faster to perform "exploratory"
>> programming with a static type system to help me along.
>
> But the static type system cannot help you with the fact that in an
> interactive setting you cannot know the nature of the inputs (as
> humans can be so clever), you cannot fully know the nature of the
> interactive processing (ditto), and therefore cannot hope to know the
> nature of the outputs (excluding the obvious reductio ad absurdum
> union type of "bag-of-bits-of-unknow-type/types"). You'll have to
> check all of this at runtime anyway.

And after the checking you'll have a statically typed expression.

- M
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8l8ej$dv$1@registered.motzarella.org>
Matthias Blume schrieb:
> Raffael Cavallaro
> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> 
>> partially known range of inputs -> combination of human interactive
>> and algorithmic processing -> unpredictable range of outputs
>>
>>
>> If this is the future of computing, then the focus on static typing is
>> a massive effort in solving the wrong problem.
> 
> I, for one, think that research in static typing is exactly the
> /right/ effort.  In my experience (and I do have experience with both
> paradigms), I find it easier and faster to perform "exploratory"
> programming with a static type system to help me along.

What about the Erlang approach?
It is dynamically typed, but with Dialyzer [1] one can analyze the
code. For Lisp Qi can do these kinds of things in some sense, but one
could develop a Lisp-Dialyzer as well. It should be able to talk about
more or less all type errors. In cases where the program is not sure if
the programmer intended changing the type during runtime it could simply
ask her, or spit out warnings.
If the compilers get such a mode they could also get out more speed
from dynamic code.

[1] http://www.it.uu.se/research/group/hipe/dialyzer


Andr�
-- 
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2ps29b5ry.fsf@my.address.elsewhere>
Andr� Thieme <······························@justmail.de> writes:

> Matthias Blume schrieb:
>> Raffael Cavallaro
>> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>>
>>> partially known range of inputs -> combination of human interactive
>>> and algorithmic processing -> unpredictable range of outputs
>>>
>>>
>>> If this is the future of computing, then the focus on static typing is
>>> a massive effort in solving the wrong problem.
>>
>> I, for one, think that research in static typing is exactly the
>> /right/ effort.  In my experience (and I do have experience with both
>> paradigms), I find it easier and faster to perform "exploratory"
>> programming with a static type system to help me along.
>
> What about the Erlang approach?
> It is dynamically typed, but with Dialyzer [1] one can analyze the
> code. For Lisp Qi can do these kinds of things in some sense, but one
> could develop a Lisp-Dialyzer as well. It should be able to talk about
> more or less all type errors. In cases where the program is not sure if
> the programmer intended changing the type during runtime it could simply
> ask her, or spit out warnings.
> If the compilers get such a mode they could also get out more speed
> from dynamic code.

I'm sure all these things are fine tools.  However, they don't address
what I am getting at.  When I write code, I spend most of my time
thinking about data-structural invariants.  What a type system such as
ML's lets me do is write these invariants down in such a way that the
compiler can then verify that my code actually adheres to them.  Thus,
the type system provides me with (a) the necessary notation for
writing down invariants, and (b) with the guarantee that violations of
these invariants are discovered early in the development cycle.

Some here (especially Mr. Joswig) have harped a lot on the
(indisputable) fact that in any given type system there necessarily
exist invariants that cannot be expressed.  Moreover, there are
undeniably a lot of interesting invariants which would be worth
verifying statically, but which cannot be expressed in most existing
type systems.

But just because there are things we cannot do there is no reason to
give up on the many that we can do quite well.  I won't decide against
taking the train from Berlin to Hamburg just because the tracks don't
also extend to New York City or to the moon.  Primality, exhaustive
rule sets, etc. are mere straw men -- put up to be knocked down.  Many
much simpler invariants exist in pretty much all programs, and modern
type systems are good at expressing and enforcing them.  Type
inference is only part of the picture: the compiler figures out some
of the more obvious invariants based on the way the code is written,
and then makes sure that other parts of the program are consistent.
But the real power comes with the ability to define your own type
abstractions.  The ML module system and similar facilities in other
HOT languages are excellent tools to this end.

Matthias
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073101553016807-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 23:39:45 -0400, Matthias Blume <····@my.address.elsewhere> said:

> But just because there are things we cannot do there is no reason to
> give up on the many that we can do quite well.  I won't decide against
> taking the train from Berlin to Hamburg just because the tracks don't
> also extend to New York City or to the moon.

But you might not take the train from Berlin to Hamburg if you thought 
you might do a side trip to Magdeburg - you might choose a more 
flexible means of transport such as a car, even if it might be somewhat 
slower.

IOW, it isn't merely that static typing doesn't do everything yet, it's 
that it also that it *forces* you to conceive of the problem 
principally in terms, as you put it, of data structure invariants - 
i.e., in terms of *the specific representation* of problem domain 
entities - whether you want to or not. The lisp way is to conceive of 
the problem in terms of the meaning of problem domain entities 
directly, and worry about the *specific* representation later. This 
approach suits many people's thinking process better than focusing on 
data stucture invariants. I have no doubt that doing so works well for 
you, but others find it easier and more productive to deal with problem 
domain entities as they are used in the problem domain, and not worry 
about their specific representation until later.
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2hcnlayc8.fsf@my.address.elsewhere>
Raffael Cavallaro
<················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> On 2007-07-30 23:39:45 -0400, Matthias Blume <····@my.address.elsewhere> said:
>
>> But just because there are things we cannot do there is no reason to
>> give up on the many that we can do quite well.  I won't decide against
>> taking the train from Berlin to Hamburg just because the tracks don't
>> also extend to New York City or to the moon.
>
> But you might not take the train from Berlin to Hamburg if you thought
> you might do a side trip to Magdeburg - you might choose a more
> flexible means of transport such as a car, even if it might be
> somewhat slower.

This is the problem with metaphors.  Someone is going to pick it up
and twist it beyond recognition.  I guess I shouldn't have brought
it up, but for the record:  All I wanted to say is that one should not
give up on a tool that is perfectly fine for some things just because
it cannot do certain other things (for which there is no tool at
all).  If I want to do some other thing for which there is a tool,
then, obviously, I'm going to use that other tool...

> IOW, it isn't merely that static typing doesn't do everything yet,
> it's that it also that it *forces* you to conceive of the problem
> principally in terms, as you put it, of data structure invariants - 
> i.e., in terms of *the specific representation* of problem domain
> entities - whether you want to or not.

If you had actually used the kind of type system we are talking about
for any significant project, you would know that this is simply not
true.  Precisely to be able to avoid thinking in concrete terms at a
time when you don't yet want to think in such terms, there are
powerful ways of defining abstract types.  Andreas Rossberg has
already tried to explain this here.

> The lisp way is to conceive of the problem in terms of the meaning
> of problem domain entities directly, and worry about the *specific*
> representation later.

I do the same thing, and guess what: the type system helps me with it!
In fact, what you describe is /precisely/ the way of approaching a
problem using abstract types.  The strength of a static type system is
that it lets you do this sort of thing in "layers": I can define my
high-level abstractions and program against those, and the compiler
will point out when I am not making sense.  I can then implement some
of my high-level abstractions by defining and using lower-level
abstractions.  Again, the type system will help me track down when I
screw up here.  And the cool thing is -- given that my high-level
abstractions from above are unchanged, I am /guaranteed/ that whatever
implementation decisions I make for them, I will not screw up the
high-level logic of the program.  This process can be repeated, one
level at a time, until implementations for all abstractions have been
filled in.

> This approach suits many people's thinking process better than
> focusing on data stucture invariants.

I strongly believe that all programmers inherently think about data
structural invariants.  However, some of them are clearly not as
keenly aware of this as others.  To be sure: By data structural
invariants I don't mean concrete bit layouts in memory or any such
thing.  I am talking about /abstract/ types, i.e., types with sets of
operations and axioms that have to hold for those operations.

> I have no doubt that doing so works well for you, but others find it
> easier and more productive to deal with problem domain entities as
> they are used in the problem domain, and not worry about their
> specific representation until later.

As I have hopefully explained by now, the two choices are actually not
at conflict.  Thinking about data structure invariants (at various
levels of abstraction) certainly does work well for me, and that is
precisely /because/ it lets me "deal with problem domain entities as
they are used in the problem domain, and not worry about their
specific representation until later."

Cheers,
Matthias
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073102403827544-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-31 02:20:23 -0400, Matthias Blume <····@my.address.elsewhere> said:

> I strongly believe that all programmers inherently think about data
> structural invariants.  However, some of them are clearly not as
> keenly aware of this as others.  To be sure: By data structural
> invariants I don't mean concrete bit layouts in memory or any such
> thing.  I am talking about /abstract/ types, i.e., types with sets of
> operations and axioms that have to hold for those operations.

I disagree with this strongly. There are certainly classes of problems 
for which this true, but these are not what dynamic languages are about.

There exist domains where the entities and their relationships do not 
yet have a clear one-to-one mapping with abstact data types and 
operations on them. One rather discovers one possible representation of 
the domain entities and their relationships as one builds the program. 
More importantly, it is very helpful to be able to have this model be 
internally *inconsistent* during development as it takes shape. It 
prevents the distraction from discovering/building the model which is 
caused by having to keep it perfectly internally consistent at all 
times just to satisfy a type-checking compiler. There are definite 
benefits of being able to run and test and expand what to a 
type-checking compiler would be considered an incorrect program, 
something that such a type-checking compiler would not let you run at 
all. iirc, joe marshall had some interesting posts on this the last go 
round a couple of years ago.
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2d4y8bnn1.fsf@my.address.elsewhere>
Raffael Cavallaro
<················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> On 2007-07-31 02:20:23 -0400, Matthias Blume <····@my.address.elsewhere> said:
>
>> I strongly believe that all programmers inherently think about data
>> structural invariants.  However, some of them are clearly not as
>> keenly aware of this as others.  To be sure: By data structural
>> invariants I don't mean concrete bit layouts in memory or any such
>> thing.  I am talking about /abstract/ types, i.e., types with sets of
>> operations and axioms that have to hold for those operations.
>
> I disagree with this strongly. There are certainly classes of problems
> for which this true, but these are not what dynamic languages are
> about.
>
> There exist domains where the entities and their relationships do not
> yet have a clear one-to-one mapping with abstact data types and
> operations on them. One rather discovers one possible representation
> of the domain entities and their relationships as one builds the
> program. More importantly, it is very helpful to be able to have this
> model be internally *inconsistent* during development as it takes
> shape. It prevents the distraction from discovering/building the model
> which is caused by having to keep it perfectly internally consistent
> at all times just to satisfy a type-checking compiler. There are
> definite benefits of being able to run and test and expand what to a
> type-checking compiler would be considered an incorrect program,
> something that such a type-checking compiler would not let you run at
> all. iirc, joe marshall had some interesting posts on this the last go
> round a couple of years ago.

This may sound good to some, but I don't think any of this is true in
reality.  For example, how to run programs that are only partially
complete even in a statically typed setting has been discussed many
times.  There does not have to be a one-to-one mapping with abstract
types.  Whatever mapping you choose is not fixed over
(development-)time, it can evolve along with the rest of the code.
And given that the types are abstract, they afford a lot of
flexibility, since being abstract they do /not/ force you to choose a
particular representation early on.

As you might remember, none of Joe Marshall's "examples" have ever
convinced me.  I don't think it is particularly helpful to be able to
run type-incorrect programs.  If parts of the program are incomplete,
there are always means of isolating them from the parts you want to
test.  I have tried to explain some of the techniques and their
overall advantages before, so I won't repeat them here.

Anyway, I think we are talking past each other.  This discussion, like
so many before, will not convince either side.  Thus, since you
strongly disagree with me, and since I strongly disagree with you, I'd
say we just agree to disagree and leave it at that.

Kind regards,
Matthias
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007080109352337709-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-31 11:26:10 -0400, Matthias Blume <····@my.address.elsewhere> said:

> As you might remember, none of Joe Marshall's "examples" have ever
> convinced me.  I don't think it is particularly helpful to be able to
> run type-incorrect programs.  If parts of the program are incomplete,
> there are always means of isolating them from the parts you want to
> test.  I have tried to explain some of the techniques and their
> overall advantages before, so I won't repeat them here.

Yes, so we clearly differ here and I don't think that any amount of 
discussion is likely to convince either of us. People who prefer 
dynamic typing want an absolute minimum of distraction from he compiler 
during exploratory programming. Type checking compilers have come a 
long way, but they can't be said to provide a minimum of distraction. 
You see no benefits of running type-incorrect programs for testing 
purposes of program parts and we do. Simply a matter of development 
style and taste.

If a language like haskell or an ml had a mode that allowed 
type-incorrect and incomplete programs to be run for testing (without 
the distraction of having to write stubs first) it would be much more 
attractive to me and, I suspect others.

Then there's the issue of minimal syntax which allows a macro system 
that uses the same langauge for macros as for functions ...
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185978706.984427.288890@w3g2000hsg.googlegroups.com>
On 1 Aug., 15:35, Raffael Cavallaro <················@pas-d'espam-s'il-
vous-plait-mac.com> wrote:

> If a language like haskell or an ml had a mode that allowed
> type-incorrect and incomplete programs to be run for testing (without
> the distraction of having to write stubs first) it would be much more
> attractive to me and, I suspect others.

What then would you expect from "running" something like
  (\(a,b) -> b) []

One can understand type inference as a feature that sorts out programs
that can't possibly run without errors and leaves those that have
errors most probably.
Can't you see that the above example is not worth running at all and
is totally different from
   head xs
where we can understand, that the compiler has no general way of
knowing beforehand, whether xs will be the empty list or not?
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8qaan$oee$1@online.de>
Ingo Menger schrieb:
> Can't you see that the above example is not worth running at all and
> is totally different from
>    head xs
> where we can understand, that the compiler has no general way of
> knowing beforehand, whether xs will be the empty list or not?

It can be worth running, if it's just the part of the system that isn't 
implemented yet.

Regards,
Jo
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186044278.734784.294440@q75g2000hsh.googlegroups.com>
On 1 Aug., 17:54, Joachim Durchholz <····@durchholz.org> wrote:
> Ingo Menger schrieb:
>
> > Can't you see that the above example is not worth running at all and
> > is totally different from
> >    head xs
> > where we can understand, that the compiler has no general way of
> > knowing beforehand, whether xs will be the empty list or not?
>
> It can be worth running, if it's just the part of the system that isn't
> implemented yet.

Thats why I prefer "bottom up". In this case, there aren't any
unimplemented parts.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hdhv1F3in0b4U1@mid.individual.net>
Ingo Menger wrote:
> On 1 Aug., 15:35, Raffael Cavallaro <················@pas-d'espam-s'il-
> vous-plait-mac.com> wrote:
> 
>> If a language like haskell or an ml had a mode that allowed
>> type-incorrect and incomplete programs to be run for testing (without
>> the distraction of having to write stubs first) it would be much more
>> attractive to me and, I suspect others.
> 
> What then would you expect from "running" something like
>   (\(a,b) -> b) []
> 
> One can understand type inference as a feature that sorts out programs
> that can't possibly run without errors and leaves those that have
> errors most probably.

No, a static type system that enforces type soundness always has to 
reject certain programs that may succeed at runtime. The trade off is 
that in static languages, preference is given to type correctness, and 
the set of semantically correct but type-incorrect programs are 
considered neglegible, whereas in dynamic languages, preference is given 
to flexibility, especially with regard to testing and reflective 
capabilities in a language.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186044168.382119.304340@22g2000hsm.googlegroups.com>
On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:

> > One can understand type inference as a feature that sorts out programs
> > that can't possibly run without errors and leaves those that have
> > errors most probably.
>
> No, a static type system that enforces type soundness always has to
> reject certain programs that may succeed at runtime.

This is a question of semantics.
One can define a semantic, where nonsensical actions like passing the
empty list to a function that extracts the second element from a
tuple, would  result in some more or less meaningful value.
An example of a language with such a forgiving semantic is perl.
Certain nonsensical constructs produce the value undef and the program
can go on with the undef value.

> The trade off is
> that in static languages, preference is given to type correctness, and
> the set of semantically correct but type-incorrect programs are
> considered neglegible,

No, they are simply non-existant by definition. You can't apply perl
semantics to haskell.

> whereas in dynamic languages, preference is given
> to flexibility, especially with regard to testing and reflective
> capabilities in a language.

The point is that one day one has to pay for certain kinds of
flexibility ...
The need for testing is, of course, the higher the more dynamic the
language.
From: Olivier Drolet
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186061080.606412.203960@22g2000hsm.googlegroups.com>
On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:

> The point is that one day one has to pay for certain kinds of
> flexibility ...
> The need for testing is, of course, the higher the more dynamic the
> language.

Unit testing should trap all the type-checking errors, as well as all
the other errors a type-checking system can't verify, right? And best
practices sugggest you should perform unit testing for any software
worthy of consideration, right? (Well, maybe not all software, but
most non-trivial code...) So, if you're going to write a test suite
anyway, the penalty for using a dynamic language may be quite small,
don't you think?
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186068149.203181.245330@q3g2000prf.googlegroups.com>
On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>
> > The point is that one day one has to pay for certain kinds of
> > flexibility ...
> > The need for testing is, of course, the higher the more dynamic the
> > language.
>
> Unit testing should trap all the type-checking errors, as well as all
> the other errors a type-checking system can't verify, right?

Trapping the error and finding out where it originates and why and
under which circumstances is not the same. As I pointed out in another
posting, this may make the temptation to quick-dirty fix it
irresistible.

> And best
> practices sugggest you should perform unit testing for any software
> worthy of consideration, right? (Well, maybe not all software, but
> most non-trivial code...) So, if you're going to write a test suite
> anyway, the penalty for using a dynamic language may be quite small,
> don't you think?

No, for the reasons above.
Furthermore, when you agree that type errors are to be avoided, why
then do you insist on being able to "run" faulty code in the first
place? Suppose we had a language with a compiler that could proove
that the code implements a specification, would you insist on being
able to run code that does not? For what reason?

The only justification for languages without strong static typing
would be the following: The type systems of existing SST-languages do
not allow certain classes of programs to be written. Therefore, in
those cases, one has to resort to other languages.
I am not sure whether this claim would be defendable now or in the
future, it surely was true when the type system of PSACAL was in
fashion.
From: Olivier Drolet
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186080208.876259.282800@g12g2000prg.googlegroups.com>
On Aug 2, 11:22 am, Ingo Menger <···········@consultant.com> wrote:
> On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
>
(snip)
>
> Trapping the error and finding out where it originates and why and
> under which circumstances is not the same.

Granted.

(snip)

> Furthermore, when you agree that type errors are to be avoided, why
> then do you insist on being able to "run" faulty code in the first
> place?

Because some type errors might no be errors? And because it is more
convenient to fix them later? I.e., fixing them now has a cost?

>Suppose we had a language with a compiler that could proove
> that the code implements a specification, would you insist on being
> able to run code that does not? For what reason?

Good question. I can see how a specification-conforming compiler might
be useful. Off hand, no reasonable person would want to violate a
spec. However, my willingness to work with such a tool might depend on
how complex and time consuming it is for me to nail down a complete,
disambiguated specification, or merely change said specification
should new requirements demand it. If the tool gets in the way...

> The only justification for languages without strong static typing
> would be the following: The type systems of existing SST-languages do
> not allow certain classes of programs to be written.

Again, how about convenience, as a justification?

>Therefore, in
> those cases, one has to resort to other languages.
> I am not sure whether this claim would be defendable now or in the
> future, it surely was true when the type system of PSACAL was in
> fashion.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b22c09$0$1591$ed2619ec@ptn-nntp-reader02.plus.net>
Olivier Drolet wrote:
> Good question. I can see how a specification-conforming compiler might
> be useful. Off hand, no reasonable person would want to violate a
> spec. However, my willingness to work with such a tool might depend on
> how complex and time consuming it is for me to nail down a complete,
> disambiguated specification, or merely change said specification
> should new requirements demand it. If the tool gets in the way...

This sword cuts both ways. Static type systems typical prevent you from
running code with type errors as you can't compile them. However, extending
a sum type in an ML variant causes the compiler to warn you of all places
in your code where pattern matches fail to account for the new type
constructor. So the static checker actually helps you develop. Also,
missing match cases are typically warnings rather than errors, so you can
still run the code (run-time errors are automatically inserted).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186089334.090711.12540@x35g2000prf.googlegroups.com>
> Good question. I can see how a specification-conforming compiler might
> be useful. Off hand, no reasonable person would want to violate a
> spec.

Because nobody has ever gotten a spec that was incomplete/misleading/
wrong, nobody ever needed to improvise during the development of a
program, and nobody was ever in a situation of "fix this now and leave
that for later, or else there is no later"...
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <czlkctpt7d.fsf@hod.lan.m-e-leypold.de>
Olivier Drolet wrote:

> On Aug 2, 11:22 am, Ingo Menger <···········@consultant.com> wrote:
>> On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
>>
> (snip)
>>
>> Trapping the error and finding out where it originates and why and
>> under which circumstances is not the same.
>
> Granted.
>
> (snip)
>
>> Furthermore, when you agree that type errors are to be avoided, why
>> then do you insist on being able to "run" faulty code in the first
>> place?
>
> Because some type errors might no be errors? And because it is more
> convenient to fix them later? I.e., fixing them now has a cost?

Fixing errors later always is more expensive. So much more expensive
that you cannot hope that the effect that you'd perhaps have to fix
fewer errors (because they got written over by new code) would
compensate for that.

Regards -- Markus
From: Olivier Drolet
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186098983.572202.262310@i13g2000prf.googlegroups.com>
On Aug 2, 4:39 pm, ·····································@ANDTHATm-e-
leypold.de (Markus E.L. 2) wrote:
> Olivier Drolet wrote:
> > On Aug 2, 11:22 am, Ingo Menger <···········@consultant.com> wrote:
> >> On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
>
> > (snip)
>
> >> Trapping the error and finding out where it originates and why and
> >> under which circumstances is not the same.
>
> > Granted.
>
> > (snip)
>
> >> Furthermore, when you agree that type errors are to be avoided, why
> >> then do you insist on being able to "run" faulty code in the first
> >> place?
>
> > Because some type errors might no be errors? And because it is more
> > convenient to fix them later? I.e., fixing them now has a cost?
>
> Fixing errors later always is more expensive. So much more expensive
> that you cannot hope that the effect that you'd perhaps have to fix
> fewer errors (because they got written over by new code) would
> compensate for that.
>
> Regards -- Markus

You know, it's funny that you should make the claim that fixing (type)
errors later is so terribly penalizing. For many decades, proponents
of dynamic languages (old and new languages, some of them strongly
typed, and with inferential type checkers) have been claiming that
using theses languages works better for them, makes them more
productive. I'm not appealing to popularity, just pointing out that
Lisp programmers seem to get by just fine. You seem to believe they're
wrong, that waiting in the hope of over/rewriting their code in the
near future is dangerous; that getting proof of concept first, and
correctness later, is on average a mistake; that getting the ideas
written down as code, unencumbered with type considerations, is going
to bite them hard.

I see this as a compromise, a choice that users of dynamic languages
make, where other strategies are employed to compensate for their
programming style (testing). Joachim, Jon and yourself do not share
this point of view.

Fine by me.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <rqsl71ii35.fsf@hod.lan.m-e-leypold.de>
Olivier Drolet wrote:

> On Aug 2, 4:39 pm, ·····································@ANDTHATm-e-
> leypold.de (Markus E.L. 2) wrote:
>> Olivier Drolet wrote:
>> > On Aug 2, 11:22 am, Ingo Menger <···········@consultant.com> wrote:
>> >> On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
>>
>> > (snip)
>>
>> >> Trapping the error and finding out where it originates and why and
>> >> under which circumstances is not the same.
>>
>> > Granted.
>>
>> > (snip)
>>
>> >> Furthermore, when you agree that type errors are to be avoided, why
>> >> then do you insist on being able to "run" faulty code in the first
>> >> place?
>>
>> > Because some type errors might no be errors? And because it is more
>> > convenient to fix them later? I.e., fixing them now has a cost?
>>
>> Fixing errors later always is more expensive. So much more expensive
>> that you cannot hope that the effect that you'd perhaps have to fix
>> fewer errors (because they got written over by new code) would
>> compensate for that.
>>
>> Regards -- Markus
>
> You know, it's funny that you should make the claim that fixing (type)
> errors later is so terribly penalizing. For many decades, proponents

Fixing any errors later is penalizing. There are even studies about
that AFAIR. Fixing them 10 minutes later of course, doesn't
cost. Fixing them tomorrow already does.

> of dynamic languages (old and new languages, some of them strongly
> typed, and with inferential type checkers) have been claiming that
> using theses languages works better for them, makes them more
> productive. I'm not appealing to popularity, just pointing out that
> Lisp programmers seem to get by just fine. You seem to believe they're
> wrong, that waiting in the hope of over/rewriting their code in the

No. I believe that they're actually doing manual type inference in
their heads ("this we use with x and x is a string so this must be a
string too") and fix type errors at once or mostly at once, not
later. Observation tells me that this is the type of reasoning that us
going on.

> near future is dangerous; that getting proof of concept first, and
> correctness later, is on average a mistake; 

You imply that a proof of concept cannot be typed well. I dispute
that. It doesn't conform to my experience and I suppose not to the
experience of thousands of SML, Ocaml and Haskell programmers out
there.

Furthermore I claim that type systems actually help in experimental
programming since, when data structures are changed grown during
experimental development, they generate warnings and/or errors at the
pleces those changed properties are used.

> that getting the ideas written down as code, unencumbered with type
> considerations, is going to bite them hard.

I further claim that I never experienced type considerations as an
encumberment, certainly not in languages with type inference and the
possibility to do type annotations sparingly.


> I see this as a compromise, a choice that users of dynamic languages
> make, where other strategies are employed to compensate for their

Yes, they make that choice. Perhaps they are different. What I do not
understand is their quasi traumatic horror of type systems. 

> programming style (testing). Joachim, Jon and yourself do not share
> this point of view.

Regards -- Markus
From: Robert Brown
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2tzrhi5yv.fsf@manolo-blahnik.bibliotech.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2)
writes:
> Fixing errors later always is more expensive.

Let's suppose this is true.  In compensation, I never have to wait for the
compiler to digest my code, and when an error is discovered in a deployed
system, I can patch it immediately.  It's hard to say which option is
superior.  I'm sure it depends on the circumstances of the task at hand.  I
will say that I find programming to be more fun when I'm not waiting for
compiles to finish.  Maybe that joy makes me more productive when using a
dynamic language.

bob
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8vdlo$8tn$1@news.xmission.com>
Robert Brown wrote:

> [...] I
> will say that I find programming to be more fun when I'm not waiting for
> compiles to finish.  Maybe that joy makes me more productive when using a
> dynamic language.

Note that the existence of a type system does not preclude interactive 
development via incremental compilation. (One reason I really enjoy 
SML/NJ in particular.)

-thant
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8viqi$f2q$2@aioe.org>
Thant Tessman escreveu:
> Robert Brown wrote:
> 
>> [...] I
>> will say that I find programming to be more fun when I'm not waiting for
>> compiles to finish.  Maybe that joy makes me more productive when using a
>> dynamic language.
> 
> Note that the existence of a type system does not preclude interactive 
> development via incremental compilation. (One reason I really enjoy 
> SML/NJ in particular.)
> 
These are not the same thing. In a dynamic language you change the live 
system.
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8vscl$jsb$1@news.xmission.com>
Cesar Rabak wrote:
> Thant Tessman escreveu:
>> Robert Brown wrote:
>>
>>> [...] I
>>> will say that I find programming to be more fun when I'm not waiting for
>>> compiles to finish.  Maybe that joy makes me more productive when 
>>> using a
>>> dynamic language.
>>
>> Note that the existence of a type system does not preclude interactive 
>> development via incremental compilation. (One reason I really enjoy 
>> SML/NJ in particular.)
>>
> These are not the same thing. In a dynamic language you change the live 
> system.

How is that different?

-thant
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f90d0p$i7u$1@aioe.org>
Thant Tessman escreveu:
> Cesar Rabak wrote:
>> Thant Tessman escreveu:
>>> Robert Brown wrote:
>>>
>>>> [...] I
>>>> will say that I find programming to be more fun when I'm not waiting 
>>>> for
>>>> compiles to finish.  Maybe that joy makes me more productive when 
>>>> using a
>>>> dynamic language.
>>>
>>> Note that the existence of a type system does not preclude 
>>> interactive development via incremental compilation. (One reason I 
>>> really enjoy SML/NJ in particular.)
>>>
>> These are not the same thing. In a dynamic language you change the 
>> live system.
> 
> How is that different?
> 
Say you have an object instantiated in live system with data attributed 
to the vars/fields, in dynamic languages you can change the behavior of 
the object and the data will be kept intact.

Is this what are calling "incremental compilation"?
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f91sot$h1h$1@news.xmission.com>
Cesar Rabak wrote:
> Thant Tessman escreveu:
>> Cesar Rabak wrote:
>>> Thant Tessman escreveu:
>>>> Robert Brown wrote:
>>>>
>>>>> [...] I
>>>>> will say that I find programming to be more fun when I'm not 
>>>>> waiting for
>>>>> compiles to finish.  Maybe that joy makes me more productive when 
>>>>> using a
>>>>> dynamic language.
>>>>
>>>> Note that the existence of a type system does not preclude 
>>>> interactive development via incremental compilation. (One reason I 
>>>> really enjoy SML/NJ in particular.)
>>>>
>>> These are not the same thing. In a dynamic language you change the 
>>> live system.
>>
>> How is that different?
>>
> Say you have an object instantiated in live system with data attributed 
> to the vars/fields, in dynamic languages you can change the behavior of 
> the object and the data will be kept intact.
> 
> Is this what are calling "incremental compilation"?

Well then I misunderstood. I would certainly hope you're not patching 
live systems often enough that compile times are your real concern. For 
what it's worth, I'm not convinced static type systems absolutely 
preclude patching live systems. I suspect they could even help make sure 
things don't get screwed up.

-thant
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f922d2$bem$1@aioe.org>
Thant Tessman escreveu:
> Cesar Rabak wrote:
>> Thant Tessman escreveu:
>>> Cesar Rabak wrote:
>>>> Thant Tessman escreveu:
>>>>> Robert Brown wrote:
>>>>>
>>>>>> [...] I
>>>>>> will say that I find programming to be more fun when I'm not 
>>>>>> waiting for
>>>>>> compiles to finish.  Maybe that joy makes me more productive when 
>>>>>> using a
>>>>>> dynamic language.
>>>>>
>>>>> Note that the existence of a type system does not preclude 
>>>>> interactive development via incremental compilation. (One reason I 
>>>>> really enjoy SML/NJ in particular.)
>>>>>
>>>> These are not the same thing. In a dynamic language you change the 
>>>> live system.
>>>
>>> How is that different?
>>>
>> Say you have an object instantiated in live system with data 
>> attributed to the vars/fields, in dynamic languages you can change the 
>> behavior of the object and the data will be kept intact.
>>
>> Is this what are calling "incremental compilation"?
> 
> Well then I misunderstood. I would certainly hope you're not patching 
> live systems often enough that compile times are your real concern. For 

They're real concern to keep MTTR as low as possible.

> what it's worth, I'm not convinced static type systems absolutely 
> preclude patching live systems. I suspect they could even help make sure 
> things don't get screwed up.

I don't think either static systems preclude, but I'm not quite sure if 
there exists a language with these features. What I know for sure is 
that Erlang has this live system patching capability, but I'm not an 
expert in Erlang.

It is however a functional language (mm)



> 
> -thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <57ir7udacy.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Thant Tessman escreveu:
>> Cesar Rabak wrote:
>>> Thant Tessman escreveu:
>>>> Cesar Rabak wrote:
>>>>> Thant Tessman escreveu:
>>>>>> Robert Brown wrote:
>>>>>>
>>>>>>> [...] I
>>>>>>> will say that I find programming to be more fun when I'm not 
>>>>>>> waiting for
>>>>>>> compiles to finish.  Maybe that joy makes me more productive when 
>>>>>>> using a
>>>>>>> dynamic language.
>>>>>>
>>>>>> Note that the existence of a type system does not preclude 
>>>>>> interactive development via incremental compilation. (One reason I 
>>>>>> really enjoy SML/NJ in particular.)
>>>>>>
>>>>> These are not the same thing. In a dynamic language you change the 
>>>>> live system.
>>>>
>>>> How is that different?
>>>>
>>> Say you have an object instantiated in live system with data 
>>> attributed to the vars/fields, in dynamic languages you can change the 
>>> behavior of the object and the data will be kept intact.
>>>
>>> Is this what are calling "incremental compilation"?
>> 
>> Well then I misunderstood. I would certainly hope you're not patching 
>> live systems often enough that compile times are your real concern. For 
>
> They're real concern to keep MTTR as low as possible.
>
>> what it's worth, I'm not convinced static type systems absolutely 
>> preclude patching live systems. I suspect they could even help make sure 
>> things don't get screwed up.
>
> I don't think either static systems preclude, but I'm not quite sure if 
> there exists a language with these features. What I know for sure is 
> that Erlang has this live system patching capability, but I'm not an 
> expert in Erlang.
>
> It is however a functional language (mm)

I've the impression this is rather theoretical. The only person
posting in this thread that has real experience in live patching seems
to be Ulf Wigner. The others seem to want to make a big thing of it
(because Lisp supposedly can do this, but where it occurs in real life
Erlang seemd to be in use actually used), and don't realize that live
patching is not an issue in practically all use cases: Too risky when
compared with other upgrade methods. I doubt that all Lisp-Programmers
are developing systems they need to live pathc to keep the MTTR low.

Furthermore, if live pathcing becomes a development methodology rather
than an upgrade mechanism I start to wonder how those people do
version control and code reviews: It must be really difficult to
recover the current state of code in the image after patching in all
changes?

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f95v6v$nsg$1@aioe.org>
Markus E.L. 2 escreveu:
> 
> Cesar Rabak wrote:
> 
>> Thant Tessman escreveu:
>>> Cesar Rabak wrote:
>>>> Thant Tessman escreveu:
>>>>> Cesar Rabak wrote:
>>>>>> Thant Tessman escreveu:
>>>>>>> Robert Brown wrote:
>>>>>>>
>>>>>>>> [...] I
>>>>>>>> will say that I find programming to be more fun when I'm not 
>>>>>>>> waiting for
>>>>>>>> compiles to finish.  Maybe that joy makes me more productive when 
>>>>>>>> using a
>>>>>>>> dynamic language.
>>>>>>> Note that the existence of a type system does not preclude 
>>>>>>> interactive development via incremental compilation. (One reason I 
>>>>>>> really enjoy SML/NJ in particular.)
>>>>>>>
>>>>>> These are not the same thing. In a dynamic language you change the 
>>>>>> live system.
>>>>> How is that different?
>>>>>
>>>> Say you have an object instantiated in live system with data 
>>>> attributed to the vars/fields, in dynamic languages you can change the 
>>>> behavior of the object and the data will be kept intact.
>>>>
>>>> Is this what are calling "incremental compilation"?
>>> Well then I misunderstood. I would certainly hope you're not patching 
>>> live systems often enough that compile times are your real concern. For 
>> They're real concern to keep MTTR as low as possible.
>>
>>> what it's worth, I'm not convinced static type systems absolutely 
>>> preclude patching live systems. I suspect they could even help make sure 
>>> things don't get screwed up.
>> I don't think either static systems preclude, but I'm not quite sure if 
>> there exists a language with these features. What I know for sure is 
>> that Erlang has this live system patching capability, but I'm not an 
>> expert in Erlang.
>>
>> It is however a functional language (mm)
> 
> I've the impression this is rather theoretical. The only person
> posting in this thread that has real experience in live patching seems
> to be Ulf Wigner. The others seem to want to make a big thing of it
> (because Lisp supposedly can do this, but where it occurs in real life
> Erlang seemd to be in use actually used), and don't realize that live
> patching is not an issue in practically all use cases: Too risky when
> compared with other upgrade methods. I doubt that all Lisp-Programmers
> are developing systems they need to live pathc to keep the MTTR low.

Ah the references war! Please get a look at a reference about Lisp in a 
spacecraft, then: http://www.flownet.com/gat/jpl-lisp.html, if you're in 
haste you can skip most of the material and check the item "1994-2000 - 
Remote Agent".

> 
> Furthermore, if live pathcing becomes a development methodology rather
> than an upgrade mechanism I start to wonder how those people do
> version control and code reviews: It must be really difficult to
> recover the current state of code in the image after patching in all
> changes?

Patching it is a technique employed in Application Management not 
develepment which has to be accompanied by SCM to avoid the havoc you 
mention. Yes maintaining software that its worht the effort requires 
discipline.

Regards,

--
Cesar Rabak
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-5EBAD7.11071906082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

> Cesar Rabak wrote:
> 
> > Thant Tessman escreveu:
> >> Cesar Rabak wrote:
> >>> Thant Tessman escreveu:
> >>>> Cesar Rabak wrote:
> >>>>> Thant Tessman escreveu:
> >>>>>> Robert Brown wrote:
> >>>>>>
> >>>>>>> [...] I
> >>>>>>> will say that I find programming to be more fun when I'm not 
> >>>>>>> waiting for
> >>>>>>> compiles to finish.  Maybe that joy makes me more productive when 
> >>>>>>> using a
> >>>>>>> dynamic language.
> >>>>>>
> >>>>>> Note that the existence of a type system does not preclude 
> >>>>>> interactive development via incremental compilation. (One reason I 
> >>>>>> really enjoy SML/NJ in particular.)
> >>>>>>
> >>>>> These are not the same thing. In a dynamic language you change the 
> >>>>> live system.
> >>>>
> >>>> How is that different?
> >>>>
> >>> Say you have an object instantiated in live system with data 
> >>> attributed to the vars/fields, in dynamic languages you can change the 
> >>> behavior of the object and the data will be kept intact.
> >>>
> >>> Is this what are calling "incremental compilation"?
> >> 
> >> Well then I misunderstood. I would certainly hope you're not patching 
> >> live systems often enough that compile times are your real concern. For 
> >
> > They're real concern to keep MTTR as low as possible.
> >
> >> what it's worth, I'm not convinced static type systems absolutely 
> >> preclude patching live systems. I suspect they could even help make sure 
> >> things don't get screwed up.
> >
> > I don't think either static systems preclude, but I'm not quite sure if 
> > there exists a language with these features. What I know for sure is 
> > that Erlang has this live system patching capability, but I'm not an 
> > expert in Erlang.
> >
> > It is however a functional language (mm)
> 
> I've the impression this is rather theoretical. The only person
> posting in this thread that has real experience in live patching seems
> to be Ulf Wigner.

That's a not true.

> The others seem to want to make a big thing of it
> (because Lisp supposedly can do this, but where it occurs in real life

That's also not true.

> Erlang seemd to be in use actually used), and don't realize that live
> patching is not an issue in practically all use cases: Too risky when
> compared with other upgrade methods. I doubt that all Lisp-Programmers
> are developing systems they need to live pathc to keep the MTTR low.

First you say there is no one in this thread with experience
doing live patching in Lisp. Now you doubt that 'ALL' need it...

Markus, how about:

There are several Lisp applictions in the field that are being
live patched.

When a friend of mine deployed his document publishing system (Web-based)
to the "White House" (Clinton and Gore era), he was sitting in
front of the console for several days and while the requests
were coming in he was watching them. Whenever somebody tried
to hack the system, he looked whether something needs to be done
and patched the live system against those 'hacks' until it
could run unattended. It ran for several years at the White House
and was written completely in Lisp.


> 
> Furthermore, if live pathcing becomes a development methodology rather
> than an upgrade mechanism I start to wonder how those people do
> version control and code reviews: It must be really difficult to
> recover the current state of code in the image after patching in all
> changes?
> 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <a1abt4k7l0.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

>> 
>> I've the impression this is rather theoretical. The only person
>> posting in this thread that has real experience in live patching seems
>> to be Ulf Wigner.
>
> That's a not true.
>
>> The others seem to want to make a big thing of it
>> (because Lisp supposedly can do this, but where it occurs in real life
>
> That's also not true.
>
>> Erlang seemd to be in use actually used), and don't realize that live
>> patching is not an issue in practically all use cases: Too risky when
>> compared with other upgrade methods. I doubt that all Lisp-Programmers
>> are developing systems they need to live pathc to keep the MTTR low.
>
> First you say there is no one in this thread with experience
> doing live patching in Lisp. Now you doubt that 'ALL' need it...

So who has the experience? You? (Certainly not Don, Cesar or Raiyner).
And did you need it? I mean at the deployment site? And the system
would not have been doable without the support Lisp provides for live
patching?

Come on -- spill some details (actually I'm not trying to disprove
you, I only had the impression from you contributions that even from
your side the advantages of live patching Lisp systems that have to be
up 24x7 has been brought forward as theoretical not as something you
actually applied. -- BUT I'm always keen on case studies, so if you
can, tell us about it.

>
> Markus, how about:
>
> There are several Lisp applictions in the field that are being
> live patched.
>
> When a friend of mine deployed his document publishing system (Web-based)
> to the "White House" (Clinton and Gore era), he was sitting in
> front of the console for several days and while the requests
> were coming in he was watching them. Whenever somebody tried
> to hack the system, he looked whether something needs to be done
> and patched the live system against those 'hacks' until it
> could run unattended. It ran for several years at the White House
> and was written completely in Lisp.

OK, but it didn't have the reuirement of being up 24x7 and I don't see
how hot patching (even if it made live easier) comes in as an absolute
non negotiable requirement. But this was the point where this sub
thread started from: We (Lisp users) can live patch and sometimes that
is absolutely required, see carrier grade and so on: Your example
(which I don't want to devalue as a great project per se) doesn't
strike me as carrier grade.


>> Furthermore, if live pathcing becomes a development methodology rather
>> than an upgrade mechanism I start to wonder how those people do
>> version control and code reviews: It must be really difficult to
>> recover the current state of code in the image after patching in all
>> changes?

And yes, please someone answer this. Personally I hate to see systems
in the wild where nobody can tell me for sure what the source version
is or was and which thus never can be reconstructed elsewhere if a
meteorite or nuclear strike hits the site. 

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-C42FAC.18165507082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

> > There are several Lisp applictions in the field that are being
> > live patched.
> >
> > When a friend of mine deployed his document publishing system (Web-based)
> > to the "White House" (Clinton and Gore era), he was sitting in
> > front of the console for several days and while the requests
> > were coming in he was watching them. Whenever somebody tried
> > to hack the system, he looked whether something needs to be done
> > and patched the live system against those 'hacks' until it
> > could run unattended. It ran for several years at the White House
> > and was written completely in Lisp.
> 
> OK, but it didn't have the reuirement of being up 24x7 and I don't see
> how hot patching (even if it made live easier) comes in as an absolute
> non negotiable requirement. But this was the point where this sub
> thread started from: We (Lisp users) can live patch and sometimes that
> is absolutely required, see carrier grade and so on: Your example
> (which I don't want to devalue as a great project per se) doesn't
> strike me as carrier grade.

You asked about experiences with live patching. I gave you
one example. If it fits your restrictions is
another case.

> >> Furthermore, if live pathcing becomes a development methodology rather
> >> than an upgrade mechanism I start to wonder how those people do
> >> version control and code reviews: It must be really difficult to
> >> recover the current state of code in the image after patching in all
> >> changes?
> 
> And yes, please someone answer this. Personally I hate to see systems
> in the wild where nobody can tell me for sure what the source version
> is or was and which thus never can be reconstructed elsewhere if a
> meteorite or nuclear strike hits the site. 

The system that had been used at the White House knew about 'systems'
and 'patches' to systems. It shows you which software at what
patch level is loaded and running.

One changes the sources of the software, generates one or more patch
files, checks the patch files for some additionally
necessary adjustments and then the patch file gets loaded.
The patch file knows versions, author, time & date, and other
stuff.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <wawsw7xp6x.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L. 2) wrote:
>
>> > There are several Lisp applictions in the field that are being
>> > live patched.
>> >
>> > When a friend of mine deployed his document publishing system (Web-based)
>> > to the "White House" (Clinton and Gore era), he was sitting in
>> > front of the console for several days and while the requests
>> > were coming in he was watching them. Whenever somebody tried
>> > to hack the system, he looked whether something needs to be done
>> > and patched the live system against those 'hacks' until it
>> > could run unattended. It ran for several years at the White House
>> > and was written completely in Lisp.
>> 
>> OK, but it didn't have the reuirement of being up 24x7 and I don't see
>> how hot patching (even if it made live easier) comes in as an absolute
>> non negotiable requirement. But this was the point where this sub
>> thread started from: We (Lisp users) can live patch and sometimes that
>> is absolutely required, see carrier grade and so on: Your example
>> (which I don't want to devalue as a great project per se) doesn't
>> strike me as carrier grade.
>
> You asked about experiences with live patching. I gave you
> one example. If it fits your restrictions is
> another case.

Forgive. Perhaps it wasn't you, but someone here argued with the
_necessity_ (not only the utility) of hot patching in carrier grad
systems. To which I answered, that I consider this part of the
discussion rather theoretical.

>
>> >> Furthermore, if live pathcing becomes a development methodology rather
>> >> than an upgrade mechanism I start to wonder how those people do
>> >> version control and code reviews: It must be really difficult to
>> >> recover the current state of code in the image after patching in all
>> >> changes?
>> 
>> And yes, please someone answer this. Personally I hate to see systems
>> in the wild where nobody can tell me for sure what the source version
>> is or was and which thus never can be reconstructed elsewhere if a
>> meteorite or nuclear strike hits the site. 
>
> The system that had been used at the White House knew about 'systems'
> and 'patches' to systems. It shows you which software at what
> patch level is loaded and running.

>
> One changes the sources of the software, generates one or more patch
> files, checks the patch files for some additionally
> necessary adjustments and then the patch file gets loaded.
> The patch file knows versions, author, time & date, and other
> stuff.

This is very nice, but not the kind of interactivity others (I think
including you) suggested at the beginning: You still work from source
and I'd even go as far as saying that "loading the patch" is hardly
more than incremental compilation (done on the running system,
admittedly).

Regards -- Markus
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xczps1r5c74.fsf@cbe.ericsson.se>
>>>>> "M" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:

  >>>  Furthermore, if live pathcing becomes a development methodology
  >>>  rather than an upgrade mechanism I start to wonder how those
  >>>  people do version control and code reviews: It must be really
  >>>  difficult to recover the current state of code in the image
  >>>  after patching in all changes?

  M> And yes, please someone answer this. Personally I hate to see
  M> systems in the wild where nobody can tell me for sure what the
  M> source version is or was and which thus never can be
  M> reconstructed elsewhere if a meteorite or nuclear strike hits the
  M> site.

I would too.

I have defended the use of live patching, but mainly for cutting 
lead time when diagnosing problems, and for small changes that don't
warrant restarts.

We never apply patches to live systems that are not digitally 
signed, generated from our own build system and fully tracable.

I wish we could also say that we know for sure that a patched
system is fully equivalent to a system installed from scratch
with the same software version, or e.g. that a system that was
rolled back from an unwanted/failed upgrade is equivalent to 
a system where no upgrade had been attempted to begin with.
We're approaching the point where I think we can claim this
with considerable confidence, but that's obviously different
from knowing it to be true.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Rob Warnock
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <d7OdnVG_ee5VsyXbnZ2dnUVZ_u2mnZ2d@speakeasy.net>
Rainer Joswig  <······@lisp.de> wrote:
+---------------
| There are several Lisp applictions in the field that are being
| live patched.
+---------------

I personally support several web sites which include a persistent
Common Lisp-based (CMUCL) application server as part of the site,
and have on numerous occasions "live patched" the production server
in preference to stopping/restarting it. [Yes, the patches had all
been previously applied to a test server, so the risk was quite small.]

Note that the Lisp application server on these systems stays up
"forever" [so far, knock on wood!] unless the underlying system
gets rebooted for some reason. [The Linux system that's at a co-lo
tends to get rebooted once a week "whether it needs it or not",
due to their use of RHEL's "auto-update" process. Others (mostly
FreeBSD-based) have uptimes in months to years...]

+---------------
| Markus wrote:
| > Furthermore, if live pathcing becomes a development methodology rather
| > than an upgrade mechanism I start to wonder how those people do
| > version control and code reviews: It must be really difficult to
| > recover the current state of code in the image after patching in all
| > changes?
+---------------

Bull. Live patching is *NOT* incompatible with good source code control.

Moreover, "live patching as a development methodology" is, IMHO,
*the* way to go for rapid deployment of web sites. I couldn't have
developed the apps I did in the small amount of time I did had I
not done it that way. Multiple windows open: a browser, a "tail -f"
of the Apache error log, an "attachtty" window to a listener/REPL
in the running server, and a window per file being edited. During
development, each page hit did an ASDF:LOAD-OP on the associated
server subsystem(s), so the development cycle was: (1) Edit whichever
files needed it; (2) Do a "save" on changed files; (3) Hit the
"Reload" [or "Refresh", for you MS users] button on the browser.
Voila! The new code gets compiled & loaded & used to serve up the
reloaded page.


-Rob

-----
Rob Warnock			<····@rpw3.org>
627 26th Avenue			<URL:http://rpw3.org/>
San Mateo, CA 94403		(650)572-2607
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3gejif1fuk.fsf@hod.lan.m-e-leypold.de>
'rpw3 AT rpw3 DOT org (Rob Warnock)' wrote:

> Rainer Joswig  <······@lisp.de> wrote:
> +---------------
> | There are several Lisp applictions in the field that are being
> | live patched.
> +---------------
>
> I personally support several web sites which include a persistent
> Common Lisp-based (CMUCL) application server as part of the site,
> and have on numerous occasions "live patched" the production server
> in preference to stopping/restarting it. [Yes, the patches had all
> been previously applied to a test server, so the risk was quite small.]
>
> Note that the Lisp application server on these systems stays up
> "forever" [so far, knock on wood!] unless the underlying system
> gets rebooted for some reason. [The Linux system that's at a co-lo
> tends to get rebooted once a week "whether it needs it or not",
> due to their use of RHEL's "auto-update" process. Others (mostly
> FreeBSD-based) have uptimes in months to years...]
>
> +---------------
> | Markus wrote:
> | > Furthermore, if live pathcing becomes a development methodology rather
> | > than an upgrade mechanism I start to wonder how those people do
> | > version control and code reviews: It must be really difficult to
> | > recover the current state of code in the image after patching in all
> | > changes?
> +---------------
>
> Bull. Live patching is *NOT* incompatible with good source code control.

So, how is it done then?

> I couldn't have
> developed the apps I did in the small amount of time I did had I
> not done it that way. Multiple windows open: a browser, a "tail -f"
> of the Apache error log, an "attachtty" window to a listener/REPL
> in the running server, and a window per file being edited. During
> development, each page hit did an ASDF:LOAD-OP on the associated
> server subsystem(s), so the development cycle was: (1) Edit whichever
> files needed it; (2) Do a "save" on changed files; (3) Hit the
> "Reload" [or "Refresh", for you MS users] button on the browser.
> Voila! The new code gets compiled & loaded & used to serve up the
> reloaded page.

Does look very similar to edit-compile-test. So where is the advantage?

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhr10hgau73a0@corp.supernews.com>
Thant Tessman wrote:
> Well then I misunderstood. I would certainly hope you're not patching
> live systems often enough that compile times are your real concern. For
> what it's worth, I'm not convinced static type systems absolutely
> preclude patching live systems.

Yes. The type system is basically irrelevant here as it would only prevent
you from trying to run broken code. However, you may want to be able to
garbage collect code as well as data. OCaml does not support this AFAIK. F#
does. I assume Lisps all do?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8tp4u$k4s$2@online.de>
Markus E.L. 2 schrieb:
> Fixing errors later always is more expensive.

Only if the developer has moved off to another part of the system in the 
mean time.
It can be useful to ignore type errors for a day or two. E.g. if you 
need to try out a few alternatives for a very specific case, don't care 
that this breaks other parts of the system for now (since you're going 
to rewrite the thing from scratch anyway once you have done your 
preliminary exploration).
It's not what one does on a day-to-day basis, but it can be occasionally 
useful.

If you really need to get all type errors fixed, you can always run the 
compiler with a "type warnings as errors" flag, I'd say.

Regards,
Jo
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w9643xjxxg.fsf@hod.lan.m-e-leypold.de>
Joachim Durchholz wrote:

> Markus E.L. 2 schrieb:
>> Fixing errors later always is more expensive.
>
> Only if the developer has moved off to another part of the system in
> the mean time.

No, I think it generally is. Of course it depends on how much later,
but it turns out that even 2 days later the developer already has lost
enough context that it simply takes longer to understand where that
error came from, which changed toggled the error and so on.

> It can be useful to ignore type errors for a day or two. E.g. if you

Brr. Only the thought. :-). Fortunately it is not necessary. If one
develops data driven, one starts with designing the data anayway :-).

> need to try out a few alternatives for a very specific case, don't
> care that this breaks other parts of the system for now 

This is what I use modularization for: Modules are put into a test
harness during experimental changes and if the changes work, the
module is reintegrated with the rest of the system. Type errors don't
occur in the rest of the system during experimental development, since
most of the modules are not compiled with the changed module during
that time. Of course version control and branching helps to put that
method to work.

> (since you're
> going to rewrite the thing from scratch anyway once you have done your
> preliminary exploration).
> It's not what one does on a day-to-day basis, but it can be
> occasionally useful.

> If you really need to get all type errors fixed, you can always run
> the compiler with a "type warnings as errors" flag, I'd say.

No. I simply don't like it :-).

Regards -- Markus

PS: And I even program C with -Wall in the compiler and eliminate all
    warnings all the time if I can help it.
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xczmyx96r4y.fsf@cbe.ericsson.se>
>>>>> "MEL" == Markus E L 2 <·····································@ANDTHATm-e-leypold.de> writes:

  MEL> Joachim Durchholz wrote:

  >>  Markus E.L. 2 schrieb:
  >>>  Fixing errors later always is more expensive.
  >>
  >>  Only if the developer has moved off to another part of the
  >>  system in the mean time.

  MEL> No, I think it generally is. Of course it depends on how much
  MEL> later, but it turns out that even 2 days later the developer
  MEL> already has lost enough context that it simply takes longer to
  MEL> understand where that error came from, which changed toggled
  MEL> the error and so on.

I agree (as would many people in industry) that fixing errors later
is more expensive. The rule of thumb is that the cost of fixing 
an error increases by an order of magnitude for each stage in the
production chain:

- design
- system test
- commercial operation

But there was a subtle distinction in a previous post: "fix it 
later, or there may be no later" (from memory). This is also
important. There is a constant tradeoff when trying to beat your
competitors to the market. Here, the rule of thumb is "It's better
to be first, than it is to be better". In general, the vendor who
reaches the market first, has the best chance of capturing it, IF
the product is Good Enough(tm).

So, if you spend you much time getting your initial design right,
you risk ending up with a wonderful but unsellable product. OTOH,
if you are too sloppy, you risk having your product rejected by
the customer, or seeing all your profits consumed by maintenance.

To make matters worse, there often isn't anything like a formal
specification - or if there is, it is likely to be annoyingly
informal, and faulty at that.

For the record, I'm not convinced that initial development time
has to be shorter with ML or Haskell than with a dynamically typed
language - it might well be the other way around in principle.

I happen to be using a dynamically typed language - Erlang.
The fact that it's dynamically typed is in part a historical
accident, and if I've read my history right, it could well
have been built on top of Concurrent ML rather than on Prolog.
Concurrent ML and Erlang were developed roughly in parallel,
drawing more or less on the CSP model of concurrency, but the
teams may not have been aware of each other, at least in the
early stages. The overall design goals were different, in that
the Erlang team were really looking for good ways to design
the next generation of telecom systems, and were not interested
in language design per se. The ML team was interested in how
to add concurrency to ML without breaking type safety.

The thing that makes Erlang successful in our realm are, IMO:

- The combination of declarative style and share-nothing,
  CSP-style concurrency.
- An intuitive and consistent approach to fault tolerance.
- The fact that it is easy to get into, and relatively easy
  to program even without advanced knowledge of types, FP,
  or indeed concurrency. (:
- Support libraries that make up the best telecoms middleware
  in existence (again IMO).

While I also have opinions about how we should go about writing
software, we've been fairly successful using Erlang in a way
that is familiar to our colleagues, using roughly the same 
type of methodology and the same type of designers. Once we had
accepted Erlang as a very productive tool, we could adapt the 
design process to become even more productive.

It could be that in doing this, we're also helping to prepare
for the introduction of other powerful languages as well.

I hope so.


BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tovp$k4s$1@online.de>
Not that I'm adverse to what you write in general, but:

Olivier Drolet schrieb:
> Because some type errors might no be errors?

With the right type-friendly design, this kind of error is practically 
unheard of in languages like Haskell or SML.
(For languages with comparatively weak type systems, such as C++ and 
Java, run-time type checking can indeed become ubiquitous, but that 
invalidates just the C++/Java approach of handling static typing, it 
does not invalidate static typing in general.)

Regards,
Jo
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186141025.563741.30380@o61g2000hsh.googlegroups.com>
On 2 Aug., 20:43, Olivier Drolet <·······@mac.com> wrote:
> On Aug 2, 11:22 am, Ingo Menger <···········@consultant.com> wrote:

> > Furthermore, when you agree that type errors are to be avoided, why
> > then do you insist on being able to "run" faulty code in the first
> > place?
>
> Because some type errors might no be errors?

This is impossible.
It is possible, however, that the type system is too narrow, that
there is no way to implement the types you need in the language.
Granted, this is, of course, a good reason not to use that language
(for that task).

> And because it is more
> convenient to fix them later? I.e., fixing them now has a cost?

As others have pointed out, it is more likely that fixing it later
will be more costly. This is particulary true when the type error
hints at a design flaw.

> > The only justification for languages without strong static typing
> > would be the following: The type systems of existing SST-languages do
> > not allow certain classes of programs to be written.
>
> Again, how about convenience, as a justification?

As a former perl programmer I understand what you mean.
I still find it fun to write a quick and dirty script. But only a few
screens, not more.
But lately (maybe it's the age?), I got used to the custom to wear the
safety belt anytime before I ever start the motor of my car. Few
people know how a face of an unbelted passenger or driver may look
after a crash with only 30 km/h ... you know.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b22aba$0$1593$ed2619ec@ptn-nntp-reader02.plus.net>
Ingo Menger wrote:
> The only justification for languages without strong static typing
> would be the following: The type systems of existing SST-languages do
> not allow certain classes of programs to be written. Therefore, in
> those cases, one has to resort to other languages.
> I am not sure whether this claim would be defendable now or in the
> future, it surely was true when the type system of PSACAL was in
> fashion.

Static typing ceases to be beneficial at run-time boundaries such as remote
procedure calls. So dynamic languages can have advantages when interfacing.
A SOAP call in Python might be:

  >>> server.getTemp('90210')

In OCaml, you'd have to box all dynamically typed values (in this case the
argument to the RPC):

  # server "getTemp" (`String "90210");;

which is longer than the equivalent Python.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186141437.047474.192490@d55g2000hsg.googlegroups.com>
On 2 Aug., 20:55, Jon Harrop <····@ffconsultancy.com> wrote:
> Ingo Menger wrote:
> > The only justification for languages without strong static typing
> > would be the following: The type systems of existing SST-languages do
> > not allow certain classes of programs to be written. Therefore, in
> > those cases, one has to resort to other languages.
> > I am not sure whether this claim would be defendable now or in the
> > future, it surely was true when the type system of PSACAL was in
> > fashion.
>
> Static typing ceases to be beneficial at run-time boundaries such as remote
> procedure calls. So dynamic languages can have advantages when interfacing.
> A SOAP call in Python might be:
>
>   >>> server.getTemp('90210')
>
> In OCaml, you'd have to box all dynamically typed values (in this case the
> argument to the RPC):
>
>   # server "getTemp" (`String "90210");;
>
> which is longer than the equivalent Python.

Sure, but this particular example is a practical problem only.
One can imagine a tool that reads a WSDL and creates type safe glue
code in ML or whatever, exposing only
  getTemp :: Server -> String -> Result
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tuma$qv0$1@aioe.org>
Ingo Menger escreveu:
> On 2 Aug., 15:24, Olivier Drolet <·······@mac.com> wrote:
>> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>>
>>> The point is that one day one has to pay for certain kinds of
>>> flexibility ...
>>> The need for testing is, of course, the higher the more dynamic the
>>> language.
>> Unit testing should trap all the type-checking errors, as well as all
>> the other errors a type-checking system can't verify, right?
> 
> Trapping the error and finding out where it originates and why and
> under which circumstances is not the same. As I pointed out in another
> posting, this may make the temptation to quick-dirty fix it
> irresistible.

OTOH, if your technology make a quick fix impossible, it will be ruled 
out for systems where the uptime has to be kept with a lot of nines 
after the dot and mean time repair is given in minutes.

> 
>> And best
>> practices sugggest you should perform unit testing for any software
>> worthy of consideration, right? (Well, maybe not all software, but
>> most non-trivial code...) So, if you're going to write a test suite
>> anyway, the penalty for using a dynamic language may be quite small,
>> don't you think?
> 
> No, for the reasons above.
> Furthermore, when you agree that type errors are to be avoided, why
> then do you insist on being able to "run" faulty code in the first
> place? 

Because for some type of business a faulty system in an specific part of 
the application may still have enough value to be worth running?
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2hcngcwix.fsf@snobis.de>
Cesar Rabak <·······@yahoo.com.br> writes:

> OTOH, if your technology make a quick fix impossible, it will be
> ruled out for systems where the uptime has to be kept with a lot of
> nines after the dot and mean time repair is given in minutes.

But that's not the important point: It's not that hard to get the
deployment right. The development cost is the main issue:

Assume a GUI database client. It may take you 10 minutes to get to the
point you try to test/experiment with. If you have to edit-compile-run
cycle 10 times, that's 100 wasted minutes as compared to a more
dynamic development environment like Lisp.

-- 
Stefan.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xlkcsz78v.fsf@ruckus.brouhaha.com>
Stefan Nobis <······@gmx.de> writes:
> Assume a GUI database client. It may take you 10 minutes to get to the
> point you try to test/experiment with. If you have to edit-compile-run
> cycle 10 times, that's 100 wasted minutes as compared to a more
> dynamic development environment like Lisp.

I just don't see a big problem, given reasonable debugging
environments, even in horribly bureaucratic languages like Java.  It
does seem to me that functional language implementations tend not to
have as many tools like debuggers, and I'm not sure why that is.
But I don't see any obstacles to using something like ghci the way
you're describing using a Lisp read-eval-print loop.  
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f90d61$i7u$3@aioe.org>
Paul Rubin escreveu:
> Stefan Nobis <······@gmx.de> writes:
>> Assume a GUI database client. It may take you 10 minutes to get to the
>> point you try to test/experiment with. If you have to edit-compile-run
>> cycle 10 times, that's 100 wasted minutes as compared to a more
>> dynamic development environment like Lisp.
> 
> I just don't see a big problem, given reasonable debugging
> environments, even in horribly bureaucratic languages like Java.  It
> does seem to me that functional language implementations tend not to
> have as many tools like debuggers, and I'm not sure why that is.
> But I don't see any obstacles to using something like ghci the way
> you're describing using a Lisp read-eval-print loop.  
The only /perhaps/ is that you have to leave the ghci running in production.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w7fy2zzgb9.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Paul Rubin escreveu:
>> Stefan Nobis <······@gmx.de> writes:
>>> Assume a GUI database client. It may take you 10 minutes to get to the
>>> point you try to test/experiment with. If you have to edit-compile-run
>>> cycle 10 times, that's 100 wasted minutes as compared to a more
>>> dynamic development environment like Lisp.
>> I just don't see a big problem, given reasonable debugging
>> environments, even in horribly bureaucratic languages like Java.  It
>> does seem to me that functional language implementations tend not to
>> have as many tools like debuggers, and I'm not sure why that is.
>> But I don't see any obstacles to using something like ghci the way
>> you're describing using a Lisp read-eval-print loop.
> The only /perhaps/ is that you have to leave the ghci running in production.

But I also doubt one really would want to introduce unreviewed code in
a production system, esp. not "just for testing".

Regards -- Markus
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m24pjgcmpj.fsf@snobis.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> I just don't see a big problem, given reasonable debugging
> environments, even in horribly bureaucratic languages like Java.

Uh? Maybe I missed something but in Java it's not possible to change
the code at runtime like adding some attributes to a class, redefining
some methods on the fly and the like.

> But I don't see any obstacles to using something like ghci the way
> you're describing using a Lisp read-eval-print loop.  

So with ghci I would be able to do the things above?

-- 
Stefan.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xodhop8jg.fsf@ruckus.brouhaha.com>
Stefan Nobis <······@gmx.de> writes:
> > I just don't see a big problem, given reasonable debugging
> > environments, even in horribly bureaucratic languages like Java.
> Uh? Maybe I missed something but in Java it's not possible to change
> the code at runtime like adding some attributes to a class, redefining
> some methods on the fly and the like.

I believe you can reload class files.  I don't know how that affects
existing instances.

> > But I don't see any obstacles to using something like ghci the way
> > you're describing using a Lisp read-eval-print loop.  
> So with ghci I would be able to do the things above?

Same thing, you can redefine stuff but I don't know what happens
to existing objects.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <d1lkcrzgdi.fsf@hod.lan.m-e-leypold.de>
Stefan Nobis wrote:

> Paul Rubin <·············@NOSPAM.invalid> writes:
>
>> I just don't see a big problem, given reasonable debugging
>> environments, even in horribly bureaucratic languages like Java.
>
> Uh? Maybe I missed something but in Java it's not possible to change
> the code at runtime like adding some attributes to a class, redefining
> some methods on the fly and the like.

But it is possible to pin point the source of error (the defective
piece of code) and then put that into a test setup ("demonstration
program") that does not require to rebuilt a complete database client.

>> But I don't see any obstacles to using something like ghci the way
>> you're describing using a Lisp read-eval-print loop.  
>
> So with ghci I would be able to do the things above?

Not really. Identifiers are not bound dynamically AFAIK.

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f90d4c$i7u$2@aioe.org>
Stefan Nobis escreveu:
> Cesar Rabak <·······@yahoo.com.br> writes:
> 
>> OTOH, if your technology make a quick fix impossible, it will be
>> ruled out for systems where the uptime has to be kept with a lot of
>> nines after the dot and mean time repair is given in minutes.
> 
> But that's not the important point: It's not that hard to get the
> deployment right. The development cost is the main issue:
> 
> Assume a GUI database client. It may take you 10 minutes to get to the
> point you try to test/experiment with. If you have to edit-compile-run
> cycle 10 times, that's 100 wasted minutes as compared to a more
> dynamic development environment like Lisp.
> 
Yes you emphasized the point I was too synthetic.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u6ps23zggp.fsf@hod.lan.m-e-leypold.de>
Stefan Nobis wrote:

> Cesar Rabak <·······@yahoo.com.br> writes:
>
>> OTOH, if your technology make a quick fix impossible, it will be
>> ruled out for systems where the uptime has to be kept with a lot of
>> nines after the dot and mean time repair is given in minutes.
>
> But that's not the important point: It's not that hard to get the
> deployment right. The development cost is the main issue:
>
> Assume a GUI database client. It may take you 10 minutes to get to the
> point you try to test/experiment with. If you have to edit-compile-run
> cycle 10 times, that's 100 wasted minutes as compared to a more
> dynamic development environment like Lisp.

But if you have to rebuilt a complete application to test a piece of
code there is something wrong with (a) your development process
(you're missing infra structure to do simple unit tests in a light
weight fasion) and (b) your build system (you're missing incremental
compilation).

Regards -- Markus
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2tzrepe9j.fsf@snobis.de>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> Stefan Nobis wrote:

>> Assume a GUI database client. It may take you 10 minutes to get to the
>> point you try to test/experiment with. If you have to edit-compile-run
>> cycle 10 times, that's 100 wasted minutes as compared to a more
>> dynamic development environment like Lisp.

> But if you have to rebuilt a complete application to test a piece of
> code there is something wrong with (a) your development process
> (you're missing infra structure to do simple unit tests in a light
> weight fasion) and (b) your build system (you're missing incremental
> compilation).

Unit tests can't test everything, there are (more time consuming)
integration and system tests. If you reshape some of the very
fundamental classes, I think incremental compilition doesn't help very
much and sometimes you need to experiment with the look and feel of a
GUI (and sometimes it's a bit time consuming to reach the
experimentation area, because of initalization, some complex DB
queries and the like).

-- 
Stefan.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <bwr6mgk8ir.fsf@hod.lan.m-e-leypold.de>
Stefan Nobis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
>
>> Stefan Nobis wrote:
>
>>> Assume a GUI database client. It may take you 10 minutes to get to the
>>> point you try to test/experiment with. If you have to edit-compile-run
>>> cycle 10 times, that's 100 wasted minutes as compared to a more
>>> dynamic development environment like Lisp.
>
>> But if you have to rebuilt a complete application to test a piece of
>> code there is something wrong with (a) your development process
>> (you're missing infra structure to do simple unit tests in a light
>> weight fasion) and (b) your build system (you're missing incremental
>> compilation).
>
> Unit tests can't test everything, there are (more time consuming)

You might note that I neversaid that. Of course if you keep the
contract of a modular unit (in typed languages represented by the
interface plus a behavioural specification), no integration tests have
to be done in most application scenarios and the contract can be
tested by unit tests again.

Modularity is all about doing QA w/o having to rebuilt whole
applications.

> integration and system tests. If you reshape some of the very

So that is what you do immediately after changing a data type and
before propagating (note that this was what we just have been
discussing)

> fundamental classes, I think incremental compilition doesn't help very
> much 

No, it doesn't. That's why there are unit tests: The core of
fundamental classes is by their very nature small. So we just wrap the
unit tests around them and have a small set of tests to compile
instead of a large application. That catches us 95% of the error so
vastly reduces the turn around time you've been stipulating.

And one doesn't change fundamental classes in a large application
every day. And then there are changes that are just extension (add a
selector), which don't even require much testing (since the model
represented by the classes is upwards compatible). so we very probably
have to rebuilt only once, not 10 times.

> and sometimes you need to experiment with the look and feel of a
> GUI (and sometimes it's a bit time consuming to reach the
> experimentation area, because of initalization, some complex DB
> queries and the like).

Yes, I usually do this with the empty GUI dummy. Even with a test
drive which only opens a limited numbers of windows for testing. Of
course that requires that one separates GUI and application core to
certain extent.

And a last aspect: Since programming is a well paid activity, I don't
consider it wrong to buy the largest and fasted build system one can
reasonable get, IF turnaround time is a problem.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhht534ehjl0e@corp.supernews.com>
Stefan Nobis wrote:
> Unit tests can't test everything, there are (more time consuming)
> integration and system tests. If you reshape some of the very
> fundamental classes, I think incremental compilition doesn't help very
> much and sometimes you need to experiment with the look and feel of a
> GUI (and sometimes it's a bit time consuming to reach the
> experimentation area, because of initalization, some complex DB
> queries and the like).

Many static languages can do this as well. F# is particularly good for
interactive GUI development, for example.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhht32qrfbv0d@corp.supernews.com>
Stefan Nobis wrote:
> Assume a GUI database client. It may take you 10 minutes to get to the
> point you try to test/experiment with. If you have to edit-compile-run
> cycle 10 times, that's 100 wasted minutes as compared to a more
> dynamic development environment like Lisp.

or like many static languages (OCaml, Haskell, F#).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b22333$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Olivier Drolet wrote:
> Unit testing should trap all the type-checking errors, as well as all
> the other errors a type-checking system can't verify, right?

If you mean run-time type checking, yes. If you mean static type checking,
no. Statically type checking code is a limited form of proof. Testing does
not displace mathematical proof, of course. Just as plugging numbers into
Fermat's last theorem gives you confidence but doesn't prove anything.

> And best practices sugggest you should perform unit testing for any
> software worthy of consideration, right? 

You should certainly test all code but not necessarily with unit tests.

> (Well, maybe not all software, but 
> most non-trivial code...) So, if you're going to write a test suite
> anyway, the penalty for using a dynamic language may be quite small,
> don't you think?

If all other things were equal, that would be a valid conclusion. However,
all other things are far from equal. Programming style is different in
dynamic and static programs, so the programs themselves are different.
Because of the overlap between testing and proving, the tests applied to
statically typed programs are wildly different to those applied to
dynamically typed programs.

So when you say "you're going to write a test suite" you are assuming that
the test suite would be the same for a dynamic or static program, which is
not correct.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8tv6k$qv0$2@aioe.org>
Jon Harrop escreveu:
[snipped]

> So when you say "you're going to write a test suite" you are assuming that
> the test suite would be the same for a dynamic or static program, which is
> not correct.
> 

This looks like a non sense to me. Test should prove that business 
requirements are met or no.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186142059.786193.70610@w3g2000hsg.googlegroups.com>
On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
> Jon Harrop escreveu:
> [snipped]
>
> > So when you say "you're going to write a test suite" you are assuming that
> > the test suite would be the same for a dynamic or static program, which is
> > not correct.
>
> This looks like a non sense to me. Test should prove that business
> requirements are met or no.

But then, you don't catch type errors at all, as has been claimed here
earlier.

I can easily test that a function computes the annualized interest or
whatever given an amount and an interest rate. But there is no
guarantee, that this function will never be called with arguments of a
wrong type. As Jon Harrop correctly pointed out, the type system does
just that (among other things) and reliefs you from caring about the
cases you don't think about in your wildest horror dreams.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8vaqu$omp$1@aioe.org>
Ingo Menger escreveu:
> On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
>> Jon Harrop escreveu:
>> [snipped]
>>
>>> So when you say "you're going to write a test suite" you are assuming that
>>> the test suite would be the same for a dynamic or static program, which is
>>> not correct.
>> This looks like a non sense to me. Test should prove that business
>> requirements are met or no.
> 
> But then, you don't catch type errors at all, as has been claimed here
> earlier.
> 
> I can easily test that a function computes the annualized interest or
> whatever given an amount and an interest rate. But there is no
> guarantee, that this function will never be called with arguments of a
> wrong type. As Jon Harrop correctly pointed out, the type system does
> just that (among other things) and reliefs you from caring about the
> cases you don't think about in your wildest horror dreams.
> 
> 
I think you miss the whole point of unit tests here: _if_ in the 
particular technology you're using this is a real _business_ risk, you 
put one or two tests for this scenario and be sure it comes out green.

Again, what it seems prevalent in this very long thread IMNHO is the 
idea of the existence *of a single way to go* in programming.

People at c.l.lisp have chosen Lisp because of the perceived advantages, 
so all these emotional phrases "reliefs you..." or "...wildest horror 
dreams." don't add to the dialogue.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186150157.777572.44220@22g2000hsm.googlegroups.com>
On 3 Aug., 15:34, Cesar Rabak <·······@yahoo.com.br> wrote:
> Ingo Menger escreveu:
>
>
>
> > On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
> >> Jon Harrop escreveu:
> >> [snipped]
>
> >>> So when you say "you're going to write a test suite" you are assuming that
> >>> the test suite would be the same for a dynamic or static program, which is
> >>> not correct.
> >> This looks like a non sense to me. Test should prove that business
> >> requirements are met or no.
>
> > But then, you don't catch type errors at all, as has been claimed here
> > earlier.
>
> > I can easily test that a function computes the annualized interest or
> > whatever given an amount and an interest rate. But there is no
> > guarantee, that this function will never be called with arguments of a
> > wrong type. As Jon Harrop correctly pointed out, the type system does
> > just that (among other things) and reliefs you from caring about the
> > cases you don't think about in your wildest horror dreams.
>
> I think you miss the whole point of unit tests here: _if_ in the
> particular technology you're using this is a real _business_ risk, you
> put one or two tests for this scenario and be sure it comes out green.

Sorry, but I have the impression that you don't understand.
How do you know if it is a *business* risk?
Or better, when is calling a function with wrong arguments *not a
business* risk? Perhaps when the application that does that has no
relevance whatsoever.
And how can you be sure, by unit testing one function, that it will
not get called with wrong arguments? For that, you had to test the
calling code!

> Again, what it seems prevalent in this very long thread IMNHO is the
> idea of the existence *of a single way to go* in programming.

Not at all.
I have posted quite a few articles here where I admit that using
strongly typed languages, imperfect as they are, may not be the
optimal solution.
OTOH, it strikes me that people still don't understand that unit tests
are in no way a replacement for a type system.

> People at c.l.lisp have chosen Lisp because of the perceived advantages,
> so all these emotional phrases "reliefs you..." or "...wildest horror
> dreams." don't add to the dialogue.

It may be that where you live this would count as emotional phrases. I
ask you kindly to respect my cultural background, where such phrases
are only there to make the discussion more interesting, natural
language wise.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8viog$f2q$1@aioe.org>
Ingo Menger escreveu:
> On 3 Aug., 15:34, Cesar Rabak <·······@yahoo.com.br> wrote:
>> Ingo Menger escreveu:
>>
>>
>>
>>> On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
>>>> Jon Harrop escreveu:
>>>> [snipped]
>>>>> So when you say "you're going to write a test suite" you are assuming that
>>>>> the test suite would be the same for a dynamic or static program, which is
>>>>> not correct.
>>>> This looks like a non sense to me. Test should prove that business
>>>> requirements are met or no.
>>> But then, you don't catch type errors at all, as has been claimed here
>>> earlier.
>>> I can easily test that a function computes the annualized interest or
>>> whatever given an amount and an interest rate. But there is no
>>> guarantee, that this function will never be called with arguments of a
>>> wrong type. As Jon Harrop correctly pointed out, the type system does
>>> just that (among other things) and reliefs you from caring about the
>>> cases you don't think about in your wildest horror dreams.
>> I think you miss the whole point of unit tests here: _if_ in the
>> particular technology you're using this is a real _business_ risk, you
>> put one or two tests for this scenario and be sure it comes out green.
> 
> Sorry, but I have the impression that you don't understand.
> How do you know if it is a *business* risk?
> Or better, when is calling a function with wrong arguments *not a
> business* risk? Perhaps when the application that does that has no
> relevance whatsoever.

It is not a business risk if the application is designed so that the 
wrong type will not happen by design. Or we talking about an app that 
has to read random chunks of data from unknown sources all the time?

> And how can you be sure, by unit testing one function, that it will
> not get called with wrong arguments? For that, you had to test the
> calling code!

Of course you have to test all the calling code (or have it certified 
like code in libraries) and after that system testing, regression 
testing, integration tests and delivery testing. Or your claims include 
the exclusion of these tests as well?

No type system will be enough because during a non toy program you'll 
need to convert data from a format to another and/or  (in languages that 
allow that) coerce data from a type to another, etc. Making in large 
applications all this comfort about type checking less important on the 
whole.

> 
>> Again, what it seems prevalent in this very long thread IMNHO is the
>> idea of the existence *of a single way to go* in programming.
> 
> Not at all.
> I have posted quite a few articles here where I admit that using
> strongly typed languages, imperfect as they are, may not be the
> optimal solution.

OK, I don't have a so good memory to recall this with the accuracy about 
your posts, but remember I was posting to the thread!

> OTOH, it strikes me that people still don't understand that unit tests
> are in no way a replacement for a type system.

No, in fact I would say that in theory it would be tempting to see them 
as orthogonal, but in practice all tests serve the purpose to catch 
errors, and be them unit or integration test catch also errors in typing 
if they put in the suite.

> 
>> People at c.l.lisp have chosen Lisp because of the perceived advantages,
>> so all these emotional phrases "reliefs you..." or "...wildest horror
>> dreams." don't add to the dialogue.
> 
> It may be that where you live this would count as emotional phrases. I
> ask you kindly to respect my cultural background, where such phrases
> are only there to make the discussion more interesting, natural
> language wise.

I accept this as a very interesting argument!
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186224974.551214.223050@g4g2000hsf.googlegroups.com>
On 3 Aug., 17:49, Cesar Rabak <·······@yahoo.com.br> wrote:
> Ingo Menger escreveu:
>
>
>
>
>
> > On 3 Aug., 15:34, Cesar Rabak <·······@yahoo.com.br> wrote:
> >> Ingo Menger escreveu:
>
> >>> On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
> >>>> Jon Harrop escreveu:
> >>>> [snipped]
> >>>>> So when you say "you're going to write a test suite" you are assuming that
> >>>>> the test suite would be the same for a dynamic or static program, which is
> >>>>> not correct.
> >>>> This looks like a non sense to me. Test should prove that business
> >>>> requirements are met or no.
> >>> But then, you don't catch type errors at all, as has been claimed here
> >>> earlier.
> >>> I can easily test that a function computes the annualized interest or
> >>> whatever given an amount and an interest rate. But there is no
> >>> guarantee, that this function will never be called with arguments of a
> >>> wrong type. As Jon Harrop correctly pointed out, the type system does
> >>> just that (among other things) and reliefs you from caring about the
> >>> cases you don't think about in your wildest horror dreams.
> >> I think you miss the whole point of unit tests here: _if_ in the
> >> particular technology you're using this is a real _business_ risk, you
> >> put one or two tests for this scenario and be sure it comes out green.
>
> > Sorry, but I have the impression that you don't understand.
> > How do you know if it is a *business* risk?
> > Or better, when is calling a function with wrong arguments *not a
> > business* risk? Perhaps when the application that does that has no
> > relevance whatsoever.
>
> It is not a business risk if the application is designed so that the
> wrong type will not happen by design.

To prove that such is the case is exactly the job of a strong type
system.
An empirical observation like "1000 tests run without errors" is not
nearly as strong as a formal proof. So when it come to "business risk"
I'd prefer a formal proof that there is none, whenever such a proof is
possible.

> Or we talking about an app that
> has to read random chunks of data from unknown sources all the time?

That's no problem. You write a parser that is prooven to accept input
language X and only that and returns either a data structure
representing the input or a failure indication, i.e. in haskellish

type ParserInput = Either Error ParseData
parser :: Input -> ParserInput
dothework :: ParseData -> ...

Since you can only get at the ParserData via case discrimination,
you'll not be able to call dothework when the input was incorrect.


> > And how can you be sure, by unit testing one function, that it will
> > not get called with wrong arguments? For that, you had to test the
> > calling code!
>
> Of course you have to test all the calling code (or have it certified
> like code in libraries) and after that system testing, regression
> testing, integration tests and delivery testing. Or your claims include
> the exclusion of these tests as well?

Look, I don't advocate the exclusion of any tests. But the bad thing
about tests is that they are the analogon to empirical observations.
You can never (except for the most trivial cases) proove a theory
(here: my program is correct) by heaping up empirical evidence.
Ironically, tests can only proove the opposite (i.e., an observation
can disproove a theory).
Nevertheless, we still have to test, because we just don't have any
better method of establishing evidence that the program will do what
it ought to.


> No type system will be enough because during a non toy program you'll
> need to convert data from a format to another

And why, do you think, couldn't you do that in strongly typed
language?

> Making in large
> applications all this comfort about type checking less important on the
> whole.

No. On the contrary.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186237966.149306.228970@i13g2000prf.googlegroups.com>
> Look, I don't advocate the exclusion of any tests. But the bad thing
> about tests is that they are the analogon to empirical observations.

Yet, physics, biology, chemistry, and engineering manage to get by
just fine with empirical observation. Planes, bridges, and clones get
created without ever being rigorously type-checked...
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <12yxbh1hyy6ct$.1iv3crn0gb68c$.dlg@40tude.net>
Rayiner Hashem wrote:

> Yet, physics, biology, chemistry, and engineering manage to get by
> just fine with empirical observation. Planes, bridges, and clones get
> created without ever being rigorously type-checked...

The "type system", i.e. constraints of engineering are the physical laws.
Lisp is more like art, without much constraints. Both can be useful.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186242155.740748.327160@e16g2000pri.googlegroups.com>
> The "type system", i.e. constraints of engineering are the physical laws.
> Lisp is more like art, without much constraints. Both can be useful.

The point I was trying to get at was that "empirical observation" is
not a dirty word. It is the basis of modern civilization. No physical
law can be proven to be correct, they are all "merely" validated by
empirical observation. And by and large, most engineers don't even
work with these unproven "laws". They work with highly simplified
models validated by repeated experimentation. Some of these models are
rigorously derived from unproven laws, and many are not. Even those
that are derived from physical laws are vastly simplified (eg: higher-
order terms are ignored, steady-state assumptions are made, etc). Many
models (eg: structural mechanics, aerodynamics) do not even attempt to
reflect the underlying physical interactions (electromagnetic
interactions between atoms), but rather take an approach of
considering idealized bulk materials. Designs created based on these
unproven models based on unproven laws are not even proven to conform
to those models! Rather, designs are validated by repeated
experimentation, first in simulation, then in the real world.

There is a lot that art and engineering have in common. Both have
rules, of a sort, or at least convention. Both have an intense
appreciation for empirical observation (this is beautiful because I
can see that it is beautiful; this works because I can see that it
works), and for experimentation.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xabt7qfbp.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> The point I was trying to get at was that "empirical observation" is
> not a dirty word. 

Sure, empirical observation is better than nothing, but why settle for
observational evidence of something, if you can prove it as a theorem
without a lot more effort?

> No physical law can be proven to be correct, they are all "merely"
> validated by empirical observation.

Nonethless, a heck of a lot of physicists spend their nights and days
proving theorems, rather than observing.  I.e. they begin with some
axioms (maybe established through observation), but what they do for a
living is reason mathematically about the consequences of the axioms.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f92lrp$148$1@aioe.org>
Paul Rubin escreveu:
> Rayiner Hashem <·······@gmail.com> writes:
>> The point I was trying to get at was that "empirical observation" is
>> not a dirty word. 
> 
> Sure, empirical observation is better than nothing, but why settle for
> observational evidence of something, if you can prove it as a theorem
> without a lot more effort?

Because theorems can only be proved correct withing closed systems which 
the Real World is not.

> 
>> No physical law can be proven to be correct, they are all "merely"
>> validated by empirical observation.
> 
> Nonethless, a heck of a lot of physicists spend their nights and days
> proving theorems, rather than observing.  I.e. they begin with some
> axioms (maybe established through observation), but what they do for a
> living is reason mathematically about the consequences of the axioms.

Physicists never "prove theorems" there are no Theorems in Physics.

There are Hypothesis that through experimentation are converted in 
accept "Laws of Nature" which keep constantly being challenged by new 
evidence and new discoveries.

In Engineering we go even more pragmatic and can work with enough 
experimental evidence even before a robust theory could have been 
developed, as commented elsethread.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186258665.595127.34790@j4g2000prf.googlegroups.com>
> Sure, empirical observation is better than nothing, but why settle for
> observational evidence of something, if you can prove it as a theorem
> without a lot more effort?

My point is that extensive testing has proven to work perfectly well
in physics and engineering. It can get you to an arbitrary degree of
confidence in the correctness of your software. You can never do
better than an arbitrary degree of confidence, even in the presence of
a static type system. You may prove that your code conforms to a type
model, but you have no guarantees at all about whether that type model
conforms correctly to your problem.

Ultimately, I don't think static type systems solve the problems that
need to be solved in programming. If extensive testing can get you
within an X% confidence level, where X% is big enough that it suffices
for all of physics and engineering, I don't think that it's a
bottleneck for the development of quality software. I think agile and
responsive development are greater bottlenecks, ones that Lisp
addresses directly. I want tools that let me quickly experiment with
algorithms, or easily fix logic errors in the field. I want something
that me say "yes, I can do that right now" when my boss throws in a
last minute requirement, not "sorry, that doesn't fit into the
original spec". I want something suitable for solving hard problems,
the sort of problem where if I had enough insight into the problem at
the outset to properly design a type model for the program, then I
wouldn't need to even write the program!

> Nonethless, a heck of a lot of physicists spend their nights and days
> proving theorems, rather than observing.  I.e. they begin with some
> axioms (maybe established through observation), but what they do for a
> living is reason mathematically about the consequences of the axioms.

Reasoning about the consequences of mathematical models is not the
same as proving theorems. The methods and intended goals are quite
different.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6mgyp9d.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> My point is that extensive testing has proven to work perfectly well
> in physics and engineering. It can get you to an arbitrary degree of
> confidence in the correctness of your software. 

It is possible that the above is true in some sense, but if that's so,
then there should be a theorem expressing and proving how it is true.
Can you cite a theorem?

> Ultimately, I don't think static type systems solve the problems that
> need to be solved in programming. If extensive testing can get you
> within an X% confidence level, where X% is big enough that it suffices
> for all of physics and engineering,

I would like to see a theorem showing that testing can get within an
X% confidence level for arbitrary X that a program is free of type
errors, or even showing it for some reasonably small fixed (rather
than arbitrary) X.  Thanks for any URL's you can post.  If there is no
such theorem, then I don't believe the assertion as stated.  I don't
claim that no theorem can exist, but it would have to state its
conditions precisely, and I don't personally see how to do that.  It
might be an interesting research topic.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186472160.561059.285720@22g2000hsm.googlegroups.com>
On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Rayiner Hashem <·······@gmail.com> writes:
> > My point is that extensive testing has proven to work perfectly well
> > in physics and engineering. It can get you to an arbitrary degree of
> > confidence in the correctness of your software.
>
> It is possible that the above is true in some sense,

I'd rather call it a delusion.
As has been pointed out before, and is also an old wisdom in the
programming community, tests can only proove the presence of errors,
not their absence.
>From the fact that N tests could not find an error, you can't conclude
anything about the correctness of the software. (Except in the case
that those N tests cover all possible combinations of input in the
wider sense.)
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hqmc3F3kr30oU1@mid.individual.net>
Ingo Menger wrote:
> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
>> Rayiner Hashem <·······@gmail.com> writes:
>>> My point is that extensive testing has proven to work perfectly well
>>> in physics and engineering. It can get you to an arbitrary degree of
>>> confidence in the correctness of your software.
>> It is possible that the above is true in some sense,
> 
> I'd rather call it a delusion.
> As has been pointed out before, and is also an old wisdom in the
> programming community, tests can only proove the presence of errors,
> not their absence.

That's complete and utter nonsense (or to put it mildly: an unfounded 
claim). A test case can prove the correctness for exactly one set of 
input data. A formal proof of correctness, on the other hand, is only as 
good as its initial assumptions. With proofs of correctness you are only 
shifting the problem to (a) whether you have made the right assumptions 
and (b) whether the proof itself is actually correct.

Static type systems and theorem provers only help with the latter. Tests 
help you to check whether your assumptions are correct for particular cases.

For the notion of correctness and its limitations, see 
http://doi.acm.org/10.1145/379486.379512


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6mf93uj.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> For the notion of correctness and its limitations, see
> http://doi.acm.org/10.1145/379486.379512

This needs an account for access--do you know some other place the
paper is online?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8kmyx31g5b.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Pascal Costanza <··@p-cos.net> writes:
>> For the notion of correctness and its limitations, see
>> http://doi.acm.org/10.1145/379486.379512
>
> This needs an account for access--do you know some other place the
> paper is online?

Scanned paper from 1985 -- probably no online source. You can try to
contact the author or his institution for a reprint (some have
retained the right) but that is probably expensive. You will have to
look in the library (the communications are available in printed form).

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hrsn2F3liefbU1@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>> For the notion of correctness and its limitations, see
>> http://doi.acm.org/10.1145/379486.379512
> 
> This needs an account for access--do you know some other place the
> paper is online?

No, unfortunately not.

Here is a short summary: There is no such thing as correctness. There is 
only relative consistency of two different (formal) descriptions of the 
same thing.

You can only prove consistency of a program with regard to some given 
specification. This means you have "only" shifted the question of the 
program's correctness to the question whether the specification is 
correct or not. Depending on the problem domain, you haven't achieved 
anything.

There are systematic problem domains (like basic algorithms, games with 
very strict and unambiguous rules, etc.) where it is straightforward to 
give good unambiguous specifications. For such problem domains, it is 
also feasible to provide correctness proofs (which most probably involve 
a static type system of some sort).

Many problem domains are not like that, though. One example in that 
paper is a missile defense system that uses an AI to determine whether a 
country is attacked in some way or not. You simply cannot give good and 
unambiguous specifications here. So correctness "proofs" are practically 
infeasible in such cases.

In other words, whether a piece of software does what the user expects 
it to do depends as much on the program itself as on the specification. 
In some problem domains, a user can only tell after the fact whether 
certain behavior or results are "correct" in a meaningful sense of the word.

Test cases are a better way to record user expectations here: You can 
basically present them input/output pairs, and they can decide whether 
these pairs are "correct" or not. (I am oversimplifying here.)

[I have given this summary off the top of my head, and haven't checked 
whether it's accurate. Especially, that paper doesn't talk about 
testing, if I remember correctly. But what I have written above is what 
I believe to be the gist of that paper.]


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186475864.988551.312430@19g2000hsx.googlegroups.com>
On 7 Aug., 09:47, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:
> > On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
> >> Rayiner Hashem <·······@gmail.com> writes:
> >>> My point is that extensive testing has proven to work perfectly well
> >>> in physics and engineering. It can get you to an arbitrary degree of
> >>> confidence in the correctness of your software.
> >> It is possible that the above is true in some sense,
>
> > I'd rather call it a delusion.
> > As has been pointed out before, and is also an old wisdom in the
> > programming community, tests can only proove the presence of errors,
> > not their absence.
>
> That's complete and utter nonsense (or to put it mildly: an unfounded
> claim).

No, it's the truth we all have to live with.
Pure testing alone cannot establish any confidence in correctness,
whatsoever.
What *does* establish confidence is reasoning. For example, for a
function with a natural number argument that recursively calls itself,
you run test cases for 0 and 1 to make sure that the function runs ok
for the base case and for the recursive case. Then, by inductive
reasoning, you conclude that it won't fail with any other valid input
whatsoever.

> A test case can prove the correctness for exactly one set of
> input data.

Sure. But this is nothing when the set of possible input data sets is
infinite (which is almost always the case for nontrivial programs).

> A formal proof of correctness, on the other hand, is only as
> good as its initial assumptions. With proofs of correctness you are only
> shifting the problem to (a) whether you have made the right assumptions
> and (b) whether the proof itself is actually correct.

Yes, but (b) is not really an issue with automatic proofs.
And for (a), assumptions have to be made in any case. It's just
whether those assumptions are made explicit or not.
Did it ever occur to you, that you had a problem with some program
code, and asked a friend about it, and in the same moment where you
had explained the problem, the solution came to your mind? This
happens often because one is forced to make one's assumptions explicit
in order to be understood by someone else.

Bottom line: Confidence in correctness cannot be established without
reasoning. Explicit reasoning written down in a formal notation could
at least be checked for correctness. "Unconcious reasoning" in the
head of the programmer is often fallacious.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hrsunF3lpu5iU1@mid.individual.net>
Ingo Menger wrote:
> On 7 Aug., 09:47, Pascal Costanza <····@p-cos.net> wrote:
>> Ingo Menger wrote:
>>> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
>>>> Rayiner Hashem <·······@gmail.com> writes:
>>>>> My point is that extensive testing has proven to work perfectly well
>>>>> in physics and engineering. It can get you to an arbitrary degree of
>>>>> confidence in the correctness of your software.
>>>> It is possible that the above is true in some sense,
>>> I'd rather call it a delusion.
>>> As has been pointed out before, and is also an old wisdom in the
>>> programming community, tests can only proove the presence of errors,
>>> not their absence.
>> That's complete and utter nonsense (or to put it mildly: an unfounded
>> claim).
> 
> No, it's the truth we all have to live with.
> Pure testing alone cannot establish any confidence in correctness,
> whatsoever.

It can establish confidence in everything that is covered by the test 
suite. Not less, and very strictly speaking also not more. However, what 
a lot of people seem to experience is that a sufficiently complete test 
suite provides establishes enough confidence even in the cases that are 
not explicitly enumerated. There are, of course, no guarantees here.

> What *does* establish confidence is reasoning. For example, for a
> function with a natural number argument that recursively calls itself,
> you run test cases for 0 and 1 to make sure that the function runs ok
> for the base case and for the recursive case. Then, by inductive
> reasoning, you conclude that it won't fail with any other valid input
> whatsoever.

In most languages, this actually fails, because integer types typically 
wrap around after a certain threshold. There have been cases where the 
model used for reasoning didn't actually match the underlying language. 
The only way to ultimately find out whether your model matches the 
underlying language is by testing.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186557825.373239.145980@r34g2000hsd.googlegroups.com>
On 7 Aug., 20:45, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:
> > On 7 Aug., 09:47, Pascal Costanza <····@p-cos.net> wrote:
> >> Ingo Menger wrote:
> >>> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
> >>>> Rayiner Hashem <·······@gmail.com> writes:
> >>>>> My point is that extensive testing has proven to work perfectly well
> >>>>> in physics and engineering. It can get you to an arbitrary degree of
> >>>>> confidence in the correctness of your software.
> >>>> It is possible that the above is true in some sense,
> >>> I'd rather call it a delusion.
> >>> As has been pointed out before, and is also an old wisdom in the
> >>> programming community, tests can only proove the presence of errors,
> >>> not their absence.
> >> That's complete and utter nonsense (or to put it mildly: an unfounded
> >> claim).
>
> > No, it's the truth we all have to live with.
> > Pure testing alone cannot establish any confidence in correctness,
> > whatsoever.
>
> It can establish confidence in everything that is covered by the test
> suite. Not less, and very strictly speaking also not more. However, what
> a lot of people seem to experience is that a sufficiently complete test
> suite provides establishes enough confidence even in the cases that are
> not explicitly enumerated. There are, of course, no guarantees here.
>
> > What *does* establish confidence is reasoning. For example, for a
> > function with a natural number argument that recursively calls itself,
> > you run test cases for 0 and 1 to make sure that the function runs ok
> > for the base case and for the recursive case. Then, by inductive
> > reasoning, you conclude that it won't fail with any other valid input
> > whatsoever.
>
> In most languages, this actually fails, because integer types typically
> wrap around after a certain threshold.

This complicates the reasoning just a bit, but it's still
indispensable.

> There have been cases where the
> model used for reasoning didn't actually match the underlying language.

One argument more to use as much automatic reasoning as one can get!
For, even if the automatic tools themselves couldn't be prooven
correct formally, the empirical evidence that they are will be
collected with every usage.

> The only way to ultimately find out whether your model matches the
> underlying language is by testing.

You're making a fool out of yourself when you deny the fundamental
role of reasoning. Without reasoning, test results are just rows of
meaningless signs. This begins with the question, how the tests you
choose to perform are related to the specification? How do you test
THAT? You know, there have been cases where the models used for
testing didn't actually match the specification :)
For an example, consider this case.
Specification: "Implement a stable sort."
Tests:
Input     Output     Rating
[1,2,4]   [1,2,4]    Ok
[4,2,1]   [1,2,4]    Ok
...       [1,2,4]    Ok (for all other permutations)
[]        []         Ok (base case)
[42]      [42]       Ok (base case)

Mission accomplished!?
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5htcp3F3m9gp5U1@mid.individual.net>
Ingo Menger wrote:

>> There have been cases where the
>> model used for reasoning didn't actually match the underlying language.
> 
> One argument more to use as much automatic reasoning as one can get!

The more automatic reasoning you do, the more you have to convince 
yourself that the involved models actually match the underlying reality.

> For, even if the automatic tools themselves couldn't be prooven
> correct formally, the empirical evidence that they are will be
> collected with every usage.

...and you don't consider this testing?

>> The only way to ultimately find out whether your model matches the
>> underlying language is by testing.
> 
> You're making a fool out of yourself when you deny the fundamental
> role of reasoning.

What the heck makes you think that I am doing that?


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9gfgr$q1h$1@aioe.org>
Ingo Menger escreveu:
> On 7 Aug., 20:45, Pascal Costanza <····@p-cos.net> wrote:
>> Ingo Menger wrote:
>>> On 7 Aug., 09:47, Pascal Costanza <····@p-cos.net> wrote:
>>>> Ingo Menger wrote:
>>>>> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
>>>>>> Rayiner Hashem <·······@gmail.com> writes:
>>>>>>> My point is that extensive testing has proven to work perfectly well
>>>>>>> in physics and engineering. It can get you to an arbitrary degree of
>>>>>>> confidence in the correctness of your software.
>>>>>> It is possible that the above is true in some sense,
>>>>> I'd rather call it a delusion.
>>>>> As has been pointed out before, and is also an old wisdom in the
>>>>> programming community, tests can only proove the presence of errors,
>>>>> not their absence.
>>>> That's complete and utter nonsense (or to put it mildly: an unfounded
>>>> claim).
>>> No, it's the truth we all have to live with.
>>> Pure testing alone cannot establish any confidence in correctness,
>>> whatsoever.
>> It can establish confidence in everything that is covered by the test
>> suite. Not less, and very strictly speaking also not more. However, what
>> a lot of people seem to experience is that a sufficiently complete test
>> suite provides establishes enough confidence even in the cases that are
>> not explicitly enumerated. There are, of course, no guarantees here.
>>
>>> What *does* establish confidence is reasoning. For example, for a
>>> function with a natural number argument that recursively calls itself,
>>> you run test cases for 0 and 1 to make sure that the function runs ok
>>> for the base case and for the recursive case. Then, by inductive
>>> reasoning, you conclude that it won't fail with any other valid input
>>> whatsoever.
>> In most languages, this actually fails, because integer types typically
>> wrap around after a certain threshold.
> 
> This complicates the reasoning just a bit, but it's still
> indispensable.
> 
>> There have been cases where the
>> model used for reasoning didn't actually match the underlying language.
> 
> One argument more to use as much automatic reasoning as one can get!
> For, even if the automatic tools themselves couldn't be prooven
> correct formally, the empirical evidence that they are will be
> collected with every usage.
> 
>> The only way to ultimately find out whether your model matches the
>> underlying language is by testing.
> 
> You're making a fool out of yourself when you deny the fundamental
> role of reasoning. Without reasoning, test results are just rows of
> meaningless signs. This begins with the question, how the tests you
> choose to perform are related to the specification? How do you test
> THAT? You know, there have been cases where the models used for
> testing didn't actually match the specification :)

Since you're so professorial, I have an exercise to you:

In the start of the 20Tth century you're called to reason about this:

1) Hertzian's wave travel in right paths (Maxwell proved that);

2) It was known and proved by 400 years then that the Earth is spheric.

Now an experimenter wants to attempt send a Hertzian signal from England 
to America. The cost of this endeavour 500 thousand sterling pounds of 
that time�.


You advise on proceeding with the experiment or no?


--
Cesar Rabak

[1] I've seen someplace an analysis on this number that the amount of 
money at time was equivalent to a present space mission, but I cannot 
find the reference anymore. Anyway was a _lot_ of money!
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8wr6mf1ghg.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Ingo Menger wrote:
>> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
>>> Rayiner Hashem <·······@gmail.com> writes:
>>>> My point is that extensive testing has proven to work perfectly well
>>>> in physics and engineering. It can get you to an arbitrary degree of
>>>> confidence in the correctness of your software.
>>> It is possible that the above is true in some sense,
>> I'd rather call it a delusion.
>> As has been pointed out before, and is also an old wisdom in the
>> programming community, tests can only proove the presence of errors,
>> not their absence.
>
> That's complete and utter nonsense (or to put it mildly: an unfounded
> claim). A test case can prove the correctness for exactly one set of
> input data. A formal proof of correctness, on the other hand, is only
> as good as its initial assumptions. With proofs of correctness you are
> only shifting the problem to (a) whether you have made the right
> assumptions and (b) whether the proof itself is actually correct.

It still might get you another (better) level of confidence (in the
statistical sense). That is very valuable in QA (and for insurance
companies that insure your liability).

> Static type systems and theorem provers only help with the
> latter. Tests help you to check whether your assumptions are correct
> for particular cases.


Can't understand how static typing creeps in here again: Ingo just
opposed the unlimited (and unfounded) trust Rayiner placed into tests.

Regards -- Markus
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186519449.055234.62290@i38g2000prf.googlegroups.com>
> As has been pointed out before, and is also an old wisdom in the
> programming community, tests can only proove the presence of errors,
> not their absence.

On the other hand, Knuth says "I've only proven this program, I
haven't tested it", suggesting that tests can find things that proofs
don't.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dbvebr1gll.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 7 Aug., 05:53, Paul Rubin <·············@NOSPAM.invalid> wrote:
>> Rayiner Hashem <·······@gmail.com> writes:
>> > My point is that extensive testing has proven to work perfectly well
>> > in physics and engineering. It can get you to an arbitrary degree of
>> > confidence in the correctness of your software.
>>
>> It is possible that the above is true in some sense,
>
> I'd rather call it a delusion.
> As has been pointed out before, and is also an old wisdom in the
> programming community, tests can only proove the presence of errors,
> not their absence.
>>From the fact that N tests could not find an error, you can't conclude
> anything about the correctness of the software. (Except in the case
> that those N tests cover all possible combinations of input in the
> wider sense.)

Not quite. It provides a confidence level which has to be modified by
empirically found factors for testing method and language and
development process in question. But as it turns out "It can get you to an
arbitrary degree" is wrong: There is a ceiling you can't go through
with testing. IBM has studied the properties of QA methods extensively
in the 60s to 80s and AFAIR that was part of their findings.

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186508918.217807.279940@w3g2000hsg.googlegroups.com>
> It is possible that the above is true in some sense, but if that's so,
> then there should be a theorem expressing and proving how it is true.
> Can you cite a theorem?

This is just how experimental science works. I test something a
certain number of times under certain parameters, and that gives me an
X% level of confidence that my measurements actually reflect reality.
I couldn't care less whether there is a theorem proving the general
validity of the technique. If you have a problem with it, then I'd
think again about getting in an airplane or a car if I were you,
because that's precisely the standard to which such things are
designed.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xd4xypnbv.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> > It is possible that the above is true in some sense, but if that's so,
> > then there should be a theorem expressing and proving how it is true.
> > Can you cite a theorem?
> 
> This is just how experimental science works. I test something a
> certain number of times under certain parameters, and that gives me an
> X% level of confidence that my measurements actually reflect reality.
> I couldn't care less whether there is a theorem proving the general
> validity of the technique. If you have a problem with it, then I'd
> think again about getting in an airplane or a car if I were you,
> because that's precisely the standard to which such things are
> designed.

No, sorry, I don't buy that, airplanes and cars are not software,
so your analogy fails for several reasons.

1. Building a plane or car relies on a bunch of experimentally
verified beliefs, e.g. we have some belief B1 some measurement about
the tensile strength of steel, so we use that to design the engines,
then some belief B2 about lift-drag ratios, that we use in the wings,
then B3 that we use to get oxygen into the cabin, etc., up to B_n for
some n.  We can't be 100% certain of any of those beliefs but maybe
we're 99.999% confident in them.  Airplanes and cars just don't have
that much structural complexity, so maybe n=10, which makes the
combined confidence level about 0.99999**10, which is about 0.9999,
which is still pretty good.

Software is a heck of a lot more complex in terms of its dependency
chain.  We may have more like n=100000 for a large program.  And
0.99999**100000 is closer to zero than 1.  

2. Basic engineering itself relies on bulk properties of materials,
which are statistical, e.g. a jet engine requires cold air coming in
to prevent its parts from melting, but "temperature" means the average
speed of a lot of moving molecules whose actual speeds follow a
statistical distribution.  It doesn't matter that some are fast and
some are slow, since there's no "Maxwell's demon" and statistics
guarantee that the aggregate will be well mixed and have average
speeds in a range we can predict and rely on.  

With software, we have to care about what every single molecule is
doing because any one of them can make the whole system collapse.  And
there really are Maxwell demons out there trying to get us.  For
example, we used to write code all the time that relied on O(1)
average probes for hash table insertions.  We relied on hash functions
to produce statistical distributions like the distributions of air
molecule speeds.  It turns out that this makes Internet programs fail
in the real world, not because of unfortunate coincidences or emergent
phenomena, but because they get sent input deliberately concocted to
create hash collisions and overflow internal tables or make the
program unusably slow.  From <http://www.cs.rice.edu/~scrosby/hash/>:

    For example, both binary trees and hash tables can degenerate to
    linked lists with carefully chosen input. We show how an attacker can
    effectively compute such input, and we demonstrate attacks against the
    hash table implementations in two versions of Perl, the Squid web
    proxy, and the Bro intrusion detection system. Using bandwidth less
    than a typical dialup modem, we can bring a dedicated Bro server to
    its knees; after six minutes of carefully chosen packets, our Bro
    server was dropping as much as 71% of its traffic and consuming all of
    its CPU.

The above is something hardly anyone thought about before a few years
ago but it's a significant issue in firewall implementation now.  No
amount of traditional testing would have ever exposed that problem.

3. Fundamentally, traditional engineering (cars and planes) has the
goal of dealing with the slings and arrows of random indifferent
nature, "Murphy's law".  Ross Anderson's expression for the equivalent
situation in programming is "programming Murphy's computer",
i.e. designing programs to withstand Murphy's law.  All the different
program failures from some some sensor overflow because a temperature
got unexpectedly high, floating point loss of precision because of an
unforeseen singularity in an equation etc., are a matter of unlikely
events taking place.  Testing can possibly indeed put reasonable
bounds on the probability of such random, indifferent failures.  It's
different when the source of error is intelligent and malevolent.  To
use Ross Anderson's phrase again, we're not programming Murphy's
computer any longer--we're programming Satan's computer.  Testing your
program against the average case, or against the boundary cases you
can think of, is useless; you have to design for the worst case, which
can be extremely hard to identify.  

4. The complexity of data is also much higher than the complexity of
engineering materials, and its behavior can't be inferred from
observation.  You need reasoning and theorems.  For example (forgive
my use of Python but it should be self explanatory), consider the
following function.  Note: pow(x,y,z) computes (x**y) % z, for
positive ints x,y,z, by the usual repeated-squaring algorithm like in
SICP:

     def fermat(n):
       # n must be an integer > 0
       # return True if and only if n+10**500 is prime
       assert type(n) in (int, long) and n > 0
       a = n + 10**500
       if pow(3, a, a) == a:
          return True
       return False 

You can easily test this function against some other prime testing
function that you trust, throwing random inputs at it and seeing if
both get the same results.  But unless I messed something up, no
amount of random testing will find any errors (values of n for which
the function returns the wrong answer) and it's difficult to see what
"corner cases" to exercise.  In fact such values exist (they are
called pseudoprimes) but they are very sparse once the numbers get
that big (and we do use numbers that size in cryptography).  You can
construct them if you know what you're doing, so the above function
can cause a software failure if someone slips it a pseudoprime.

Conclusion is that observing what a program does with past inputs
doesn't predict what it will do with inputs that nobody has thought of.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186569104.064847.148940@w3g2000hsg.googlegroups.com>
On 8 Aug., 08:11, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Rayiner Hashem <·······@gmail.com> writes:
> > > It is possible that the above is true in some sense, but if that's so,
> > > then there should be a theorem expressing and proving how it is true.
> > > Can you cite a theorem?
>
> > This is just how experimental science works. I test something a
> > certain number of times under certain parameters, and that gives me an
> > X% level of confidence that my measurements actually reflect reality.
> > I couldn't care less whether there is a theorem proving the general
> > validity of the technique. If you have a problem with it, then I'd
> > think again about getting in an airplane or a car if I were you,
> > because that's precisely the standard to which such things are
> > designed.
>
> No, sorry, I don't buy that, airplanes and cars are not software,
> so your analogy fails for several reasons.
>
> 1. Building a plane or car relies on a bunch of experimentally
> verified beliefs, e.g. we have some belief B1 some measurement about
> the tensile strength of steel, so we use that to design the engines,
> then some belief B2 about lift-drag ratios, that we use in the wings,
> then B3 that we use to get oxygen into the cabin, etc., up to B_n for
> some n.  We can't be 100% certain of any of those beliefs but maybe
> we're 99.999% confident in them.  Airplanes and cars just don't have
> that much structural complexity, so maybe n=10, which makes the
> combined confidence level about 0.99999**10, which is about 0.9999,
> which is still pretty good.
>
> Software is a heck of a lot more complex in terms of its dependency
> chain.  We may have more like n=100000 for a large program.  And
> 0.99999**100000 is closer to zero than 1.  

I think you underestimate here the complexity of cars (and planes). I
have been told that the car I drive has a bus where dozens of
microporcessors communicate over, so if that's not complex enough ...
I'd refute Rayiner's argument rather by pointing out that
"experimental" science as he sees it just does not exist.
For example, in construction of bridges etc. lots of computing is done
in the field of physics that is know as "Statik" (in german). This is
nothing but trying to prove that the bridge can possibly hold
according to the well known mechanical laws. No bridge will ever be
build whose model did not pass this formal verification just "to see
what happens."
For cars and airplanes, it's the same. Years of CPU time will have
been expended on formal verification *before* the first prototype will
be build.
The engineering disciplines that deal with the real world have a
problem, though. Whatever they proove or disproove about their model
is not nessecarily true about the real thing, it depends on how
accurate the model is (or can be made, think of climate models).
We in computer science have it better. We don't have to build a model
of our program first. We can check, verify and reason about the real
thing right away.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186587149.530794.165040@r34g2000hsd.googlegroups.com>
> For example, in construction of bridges etc. lots of computing is done
> in the field of physics that is know as "Statik" (in german).

It's "statics" in English, which I assume is the same thing.

> This is
> nothing but trying to prove that the bridge can possibly hold
> according to the well known mechanical laws.

These well-known mechanical laws are not laws in the sense of
mathematical reasoning. They are approximate rules that have been
proven to correlate well with empirical evidence. Most are derived
from idealized representations of actual systems, and are known not to
be concrete descriptions of the underlying reality. Almost all of the
ones used in practice are known to only be valid within the context of
a range of simplifying assumptions.

> No bridge will ever be
> build whose model did not pass this formal verification just "to see
> what happens."

"Formal verification" in engineering is not like "formal verification"
in computer science. It is really more like a thorough form of
testing.

1) The "laws" underlying the verification are not proven to be true,
they are validated by experiment. Any reasoning based on these laws
does not provide proof, merely a certain level of assurance, given
that the simplifying assumptions underlying the "laws" do indeed hold.

2) There is no proof that the assumptions underlying these laws
actually hold in the system under study. Experimental evidence is
provided to show that the assumptions hold under the conditions of
interest, but there is no notion of proving that the assumptions
hold.

3) Even if one takes the basic laws of statics to be axiomatically
true, closed-form mathematically-precise solutions are impossible
except for the simplest of systems (eg: a single column). Even in the
simplest of systems, using those closed-form solutions to evaluate the
structure under all possible loadings is difficult. This is why
engineering structures are always designed with a large safety factor
(ie: take the results of the theory, and multiply by X to get the
required strength).

In reality, simplified closed form solutions to engineering structures
are used to get an initial design point in the correct ball-park of
the final solution*. Most "verification" is accomplished through
extensive simulation, which is really just a fast and computerized
form of testing. This testing almost always involves the analysis of
the behavior of a system at discrete loadings, and thus doesn't
provide anything resembling "proof".

*) Closed-form solutions are also used to analyze particular special
cases that are known to be problematic. For example, in designing a
wing, you'd definitely want to compute the resonance modes and
frequencies of the structure. This sort of analysis will help guide
your design and your testing, but its not a final assurance. Only
testing can provide that.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186593441.487385.4660@l70g2000hse.googlegroups.com>
On 8 Aug., 17:32, Rayiner Hashem <·······@gmail.com> wrote:
> > For example, in construction of bridges etc. lots of computing is done
> > in the field of physics that is know as "Statik" (in german).
>
> It's "statics" in English, which I assume is the same thing.
>
> > This is
> > nothing but trying to prove that the bridge can possibly hold
> > according to the well known mechanical laws.
>
> These well-known mechanical laws are not laws in the sense of
> mathematical reasoning. They are approximate rules that have been
> proven to correlate well with empirical evidence. Most are derived
> from idealized representations of actual systems, and are known not to
> be concrete descriptions of the underlying reality. Almost all of the
> ones used in practice are known to only be valid within the context of
> a range of simplifying assumptions.

This is true, no doubt. But don't overlook that those laws play the
role of axioms in the context described. We don't verify the
mechanical laws. We compute whether, under the assumptions that the
laws are true (or at least adequate for our purpose), the bridge will
not break.

>
> > No bridge will ever be
> > build whose model did not pass this formal verification just "to see
> > what happens."
>
> "Formal verification" in engineering is not like "formal verification"
> in computer science. It is really more like a thorough form of
> testing.

Of course, virtualized tests aka simulations will also be done.
And, in addition, what I call "formal verification" often goes the
other way round. For example, instead of building a bridge model and
verifying that, I'll just say "Well, I want to build a bridge of
length A, that spans a valley in heigth B and should be capable of
carrying C tons of weight and looks like the prototype P" and the
computer spits out how thick the columns will be at the base and what
sort of steel or concrete to use. (This is grossly simplified, of
course.)


> 1) The "laws" underlying the verification are not proven to be true,
> they are validated by experiment. Any reasoning based on these laws
> does not provide proof, merely a certain level of assurance, given
> that the simplifying assumptions underlying the "laws" do indeed hold.

Yes. That's the best we can get in empirical science. Though I think
that Newtons laws for example are quite accurate in describing
everyday mechanics, ironically just because we know that they are not
the last word on how the universe works.
Yet, the more surprising is it that this is done there in the realm of
physics, while in our profession which is math based and so could rely
on eternal truths automatic verification faces outright resistance.


> 2) ...
> 3) ...

This even underlines my last point.
I think, the reason behind this is 2fold:
1) computer science is a relatively young branch of math and not
everything is yet understood, much is to be discovered yet. This is
evidenced by the enormous research activity going on and also by new
knowledge that is gained literally every week.
2) (an econmical reason): Material ressources ar scarce, so nobody
would take the risk to build a not yet 100% bullet proof (as far as it
can get bullet proof, as discussed above) bridge in the hope to
"upgrade" it later. Yet this does not hold for programs. The time and
ressources spent to build a buggy program is not lost given that the
overall design and architecture are adequate. It's only a marginal
effort to build a new version with one bug less or so it seems.


> In reality, simplified closed form solutions to engineering structures
> are used to get an initial design point in the correct ball-park of
> the final solution*. Most "verification" is accomplished through
> extensive simulation, which is really just a fast and computerized
> form of testing.

Sure. You remember, I don't oppose testing.

> This testing almost always involves the analysis of
> the behavior of a system at discrete loadings, and thus doesn't
> provide anything resembling "proof".

You are right, but I am quite unsure where exactly we differ now.
Let me try to conclude:
- Collecting empirical data alone is useless. One has to have at least
a theory that tells, among other things, what claims to verify/falsify
etc.
- Resoning, in one form or the other is therefore indispensable. For
the special case of mathematical constructs (like programs) it can
(will) help us to proove some important properties.
- In the realm of hardware like cars, bridges etc., theory and formal
methods help us to come near a possible solution, thereby helping to
reduce costs that would arise by buildimg, testing, building again and
so on. In the software realm, the cost of rebuilding is often marginal
(unless the product was sold already, that is).
- Nevertheless, testing remains most important in both realms.

Ok?
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186617004.901334.111250@k79g2000hse.googlegroups.com>
> This is true, no doubt. But don't overlook that those laws play the
> role of axioms in the context described. We don't verify the
> mechanical laws. We compute whether, under the assumptions that the
> laws are true (or at least adequate for our purpose), the bridge will
> not break.

What I think you're missing is that engineers use natural laws
fundamentally differently from how mathematicians use axioms and
reasoning. Let me use a real-world example. Where I work, we have two
groups of people, those that do simulation and modeling, and those
that do testing. The simulation/modeling people help get the design in
the ballpark, but analytic solutions of theoretical behavior carry
almost no weight with the customer. They might tell you enough to be
reasonably sure that a design isn't completely wrong, but it doesn't
tell you that the design is right. What we submit in our reports to
the customer, the ones demonstrating that our design really works, is
the results of our extensive field testing.

In engineering, is also an iterative refinement loop between theory
and experiment that just doesn't make sense if you think about
physical laws as axioms. For example, there are lots of empirical
corrections to aerodynamic "laws" (really, models) that are used to
account for various effects observed in real-world testing.

> Of course, virtualized tests aka simulations will also be done.
> And, in addition, what I call "formal verification" often goes the
> other way round. For example, instead of building a bridge model and
> verifying that, I'll just say "Well, I want to build a bridge of
> length A, that spans a valley in heigth B and should be capable of
> carrying C tons of weight and looks like the prototype P" and the
> computer spits out how thick the columns will be at the base and what
> sort of steel or concrete to use. (This is grossly simplified, of
> course.)

Yes, there are tools like this in engineering (eg: Hysys in chemical
engineering), but they are mainly expediencies to help you get a
useful starting point for the design. The reality is that the
underlying physics is way too complicated for any tool to be able to
make reasonable guarantees. There is a lot of ad-hoc analysis that
takes place after that to study particular specialized behaviors, but
even after all those, all you have is a design that probably won't
explode, not one that is necessarily correct.

> - Collecting empirical data alone is useless. One has to have at least
> a theory that tells, among other things, what claims to verify/falsify
> etc.

I disagree. Empirical evidence gave us the first airplane (and the
second, and the third, etc). It wasn't until much later that we had
somewhat proper theories to describe the behaviors. (On the other
hand, I believe that the theory of electronics preceded the practice).
Where theories really come in useful is in speeding up the convergence
of iterative design. It took a long time for the Wright brothers to
collect all the relevant empirical evidence. Today, straightforward
analytic techniques will get you at least in the ballpark for most
conventional airplane designs.

> - Resoning, in one form or the other is therefore indispensable. For
> the special case of mathematical constructs (like programs) it can
> (will) help us to proove some important properties.

I agree with this part, definitely. It's important to reason about
your programs can and cannot do. I just don't think dynamic typing is
a deal-breaker in this regard, or that tools should necessarily
enforce particular properties even when the programmer doesn't want
to.

> - In the realm of hardware like cars, bridges etc., theory and formal
> methods help us to come near a possible solution, thereby helping to
> reduce costs that would arise by buildimg, testing, building again and
> so on. In the software realm, the cost of rebuilding is often marginal
> (unless the product was sold already, that is).

Is the low incremental cost of rebuilding software an argument for or
against the use of formal methods?
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186642921.141472.74770@22g2000hsm.googlegroups.com>
On 9 Aug., 01:50, Rayiner Hashem <·······@gmail.com> wrote:
> Ingo Menger wrote:
> > - In the realm of hardware like cars, bridges etc., theory and formal
> > methods help us to come near a possible solution, thereby helping to
> > reduce costs that would arise by buildimg, testing, building again and
> > so on. In the software realm, the cost of rebuilding is often marginal
> > (unless the product was sold already, that is).
>
> Is the low incremental cost of rebuilding software an argument for or
> against the use of formal methods?

I think, it's neither.
It may be an explanation for the way software development works, on
economic grounds.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pzzm131gq8.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Rayiner Hashem <·······@gmail.com> writes:
>> My point is that extensive testing has proven to work perfectly well
>> in physics and engineering. It can get you to an arbitrary degree of
>> confidence in the correctness of your software. 
>
> It is possible that the above is true in some sense, but if that's so,
> then there should be a theorem expressing and proving how it is true.
> Can you cite a theorem?
>
>> Ultimately, I don't think static type systems solve the problems that
>> need to be solved in programming. If extensive testing can get you
>> within an X% confidence level, where X% is big enough that it suffices
>> for all of physics and engineering,
>
> I would like to see a theorem showing that testing can get within an
> X% confidence level for arbitrary X that a program is free of type
> errors, or even showing it for some reasonably small fixed (rather
> than arbitrary) X.  Thanks for any URL's you can post.  If there is no
> such theorem, then I don't believe the assertion as stated.  I don't
> claim that no theorem can exist, but it would have to state its
> conditions precisely, and I don't personally see how to do that.  It
> might be an interesting research topic.

IBM had some studies on how different methods of QA work together. I
don't want to look for the URLs now (find the German script of
Prof. Klaeren of T�bingen and try to pick the source from the
bibliography: Probably no URL but dead tree articles).

The result was, to get the confidence level usually needed for high
integrity software you had to combine methods, and one only succeeded
when included formal review in the combination. So the message is:
Testing alone doesn't cut, formal review is the method of choice.

(Of course I still maintain that static typing makes review easier,
because types provide partial documentation of interfaces).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk65fsev37i39@corp.supernews.com>
Rayiner Hashem wrote:
>> Sure, empirical observation is better than nothing, but why settle for
>> observational evidence of something, if you can prove it as a theorem
>> without a lot more effort?
> 
> My point is that extensive testing has proven to work perfectly well
> in physics and engineering.

Why do you think physics is full of mathematical derivations and proofs?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186383401.932615.168150@w3g2000hsg.googlegroups.com>
On 4 Aug., 17:42, Rayiner Hashem <·······@gmail.com> wrote:
> > The "type system", i.e. constraints of engineering are the physical laws.
> > Lisp is more like art, without much constraints. Both can be useful.
>
> The point I was trying to get at was that "empirical observation" is
> not a dirty word.

Yes, this is right (and I nowhere suggested it was). It's just that
one should have made clear to oneself that empirical observations/
tests can only falsify theories, never verify one.
Whereas a correct formal proof gives us absolute confidence in the
truth of a premise. Of course, this is only possible in formal
systems.
Thus, all claims that "unit tests" or whatever could do the same as a
type system does are mistaken in a fundamental way.

> It is the basis of modern civilization.

That doesn't change the sad truth that tests can only ever proove that
a program is erronous and never that it's correct.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186423270.349922.138380@q75g2000hsh.googlegroups.com>
> That doesn't change the sad truth that tests can only ever proove that
> a program is erronous and never that it's correct.

Another thing that is a fact is that no existing compiler will check
to make sure the type model constructed in your program accurately
conforms to the reality of the problem domain.

In light of that fact, I don't find your fact particularly sad at all.
On one hand, I can have an A% probability that my program is incorrect
despite passing the unit tests, and on the other I can have a B%
probability that my program is incorrect despite passing the type
checker (by virtue of the type model being incorrect). Neither A nor B
= 0. No empirical evidence has been presented that would pin down the
relative values of A or B, just some wild speculation. So what is
there to be sad about?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xfy2v1tpr.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> On one hand, I can have an A% probability that my program is incorrect
> despite passing the unit tests, and on the other I can have a B%
> probability that my program is incorrect despite passing the type
> checker (by virtue of the type model being incorrect). Neither A nor B
> = 0. No empirical evidence has been presented that would pin down the
> relative values of A or B, just some wild speculation. So what is
> there to be sad about?

Praxis has claimed measured defect rates of under 0.1 per 1000 LOC
from the SPARK-Ada system.  Testing based approaches are at least an
order of magnitude worse.  On the other hand, it's not clear how
Praxis actually finds those defects, or knows that they didn't miss
any.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <gir6mf2wq9.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Rayiner Hashem <·······@gmail.com> writes:
>> On one hand, I can have an A% probability that my program is incorrect
>> despite passing the unit tests, and on the other I can have a B%
>> probability that my program is incorrect despite passing the type
>> checker (by virtue of the type model being incorrect). Neither A nor B
>> = 0. No empirical evidence has been presented that would pin down the
>> relative values of A or B, just some wild speculation. So what is
>> there to be sad about?
>
> Praxis has claimed measured defect rates of under 0.1 per 1000 LOC
> from the SPARK-Ada system.  Testing based approaches are at least an
> order of magnitude worse.  On the other hand, it's not clear how
> Praxis actually finds those defects, or knows that they didn't miss
> any.

Statistics: From the rate of error discovery in the field you know how
much errors there are still there already (modified by some
factors). This much like you entering a beach and finding a nice shell
immediately. This might be accident, but assuming this place at the
beach is typical, the fact that you found the shell immediately would
indicate that it is fairly common here. So you walk some twenty meters
and find a second shell of the same kind: Now the probability that the
shell is fairly common goes up. After walking some 100 meters and
finding some 10-20 shells you can pretty well guess how many shells
you'll find when walking the complete beach.

Modify that by factor what the probability is that you overlook shells
(which can be measured in a similar vain) and you have an estimation
on how many shells there are actually at this part of the beach.

Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xvebqo6se.fsf@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
> > Praxis has claimed measured defect rates of under 0.1 per 1000 LOC
> > from the SPARK-Ada system.  Testing based approaches are at least an
> > order of magnitude worse.  On the other hand, it's not clear how
> > Praxis actually finds those defects, or knows that they didn't miss
> > any.
> 
> Statistics: From the rate of error discovery in the field you know how
> much errors there are still there already (modified by some
> factors). This much like you entering a beach and finding a nice shell
> immediately. This might be accident, but assuming this place at the
> beach is typical, the fact that you found the shell immediately would
> indicate that it is fairly common here.

Right.  But suppose you see no shells, and you look everywhere in the
beach that you think is typical, and you see no shells (or very few).
How do you know the shells aren't in NON-typical places where you
didn't think to look?

> Modify that by factor what the probability is that you overlook shells
> (which can be measured in a similar vain) and you have an estimation
> on how many shells there are actually at this part of the beach.

I don't see how to measure the probability that I overlook shells
though!  In a large beach I can never know when I have found the last
shell.  Maybe they get harder and harder to find, so that after the
5th or 6th one it's almost impossible, but there are still hundreds of
them out there and I can only find the easiest ones.

The other thing I don't understand about that Praxis stuff is that
they don't seem to prototype.  They spend half their project time
writing specifications in Z, then maybe 20% (?) writing the Spark-Ada
code based on the spec, then maybe 30% testing.  I wonder how they
know what to specify.  I also wonder how many bugs they find in
testing--I've never seen them publish that number.  I don't know what
kind of tools they write the Z specs with.  I looked at some web pages
about Z and it looks grotesque, so I didn't try figuring it out much.

There is a guy I work with who has worked on aerospace projects in
Ada.  Maybe I'll ask him about this stuff.  But I don't think they
used Spark, just Ada 95 or whatever it was.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.06.08.53.44.68781@areilly.bpc-users.org>
On Sun, 05 Aug 2007 23:56:41 -0700, Ingo Menger wrote:

> On 4 Aug., 17:42, Rayiner Hashem <·······@gmail.com> wrote:
>> > The "type system", i.e. constraints of engineering are the physical laws.
>> > Lisp is more like art, without much constraints. Both can be useful.
>>
>> The point I was trying to get at was that "empirical observation" is
>> not a dirty word.
> 
> Yes, this is right (and I nowhere suggested it was). It's just that
> one should have made clear to oneself that empirical observations/
> tests can only falsify theories, never verify one.
> Whereas a correct formal proof gives us absolute confidence in the
> truth of a premise. Of course, this is only possible in formal
> systems.
> Thus, all claims that "unit tests" or whatever could do the same as a
> type system does are mistaken in a fundamental way.

No-one is making that claim.  The claim that is mostly being made is that
the relationship between formal systems capable of supporting proofs and
informal systems capable of being tested is much the same as that between
mathematics and the observable universe.  Close and useful/beautiful, but
not a guarantee of much that is real.

>> It is the basis of modern civilization.
> 
> That doesn't change the sad truth that tests can only ever proove that a
> program is erronous and never that it's correct.

And a proof only holds within the set of axioms that defines it.  The real
world can be curlier than that.  Sometimes being unable to disprove a
theory is as good as you can reasonably get.

Cheers,

-- 
Andrew
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <32ejigk813.fsf@hod.lan.m-e-leypold.de>
Andrew Reilly wrote:

> On Sun, 05 Aug 2007 23:56:41 -0700, Ingo Menger wrote:
>
>> On 4 Aug., 17:42, Rayiner Hashem <·······@gmail.com> wrote:
>>> > The "type system", i.e. constraints of engineering are the physical laws.
>>> > Lisp is more like art, without much constraints. Both can be useful.
>>>
>>> The point I was trying to get at was that "empirical observation" is
>>> not a dirty word.
>> 
>> Yes, this is right (and I nowhere suggested it was). It's just that
>> one should have made clear to oneself that empirical observations/
>> tests can only falsify theories, never verify one.
>> Whereas a correct formal proof gives us absolute confidence in the
>> truth of a premise. Of course, this is only possible in formal
>> systems.
>> Thus, all claims that "unit tests" or whatever could do the same as a
>> type system does are mistaken in a fundamental way.
>
> No-one is making that claim.  The claim that is mostly being made is that

Not strictly true. The claim "I don't need a static type system since
I catch all those errors that static typeing catches with unit tests
instead" has been put repeatedly. Might have been Rayiner, might have
been Cesar or whoever: Kindly use the archive to check.

> the relationship between formal systems capable of supporting proofs and
> informal systems capable of being tested is much the same as that between
> mathematics and the observable universe.  Close and useful/beautiful, but
> not a guarantee of much that is real.

Personally I prefer to fly in air crafts whose software has been
verified, even if the verification process is deemed unreal by some.

> And a proof only holds within the set of axioms that defines it.  The real
> world can be curlier than that.  Sometimes being unable to disprove a
> theory is as good as you can reasonably get.

That is the kind of philosophy that really gets my hair
up. Fortunately they don't let people with that convictions at
airospace development or nuclear plants or even train control (or
those people are required to leave their convictions at the door).

Regards -- Markus
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.07.23.35.15.647414@areilly.bpc-users.org>
On Mon, 06 Aug 2007 17:16:40 +0200, Markus E.L. 2 wrote:
> Personally I prefer to fly in air crafts whose software has been
> verified, even if the verification process is deemed unreal by some.

I prefer to fly in aircraft that have been thorougly tested, by real test
pilots (and before that, by various stress measurements and bench-runs of
engines and what-not), even though they contain many, many subsystems that
have not been "proven" or "verified".  Luckily, they all are.

>> And a proof only holds within the set of axioms that defines it.  The real
>> world can be curlier than that.  Sometimes being unable to disprove a
>> theory is as good as you can reasonably get.
> 
> That is the kind of philosophy that really gets my hair
> up.

Tough.  Life is hard and complicated.

> Fortunately they don't let people with that convictions at
> airospace development or nuclear plants or even train control (or
> those people are required to leave their convictions at the door).

Fortunately aeroplane development and nuclear plant construction are
engineering disciplines well rooted in the realm of checks and testing. 
They use everything at their disposal.  Sure, than means that anything
that can be proven is proven, but no-one involved in those enterprises has
any illusions that that guarantees the success of the whole project in the
real world.  That requires designing with margins of error (why would you
need those, in a "proven system"?) and thorough testing before final
deployment.

-- 
Andrew
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9dnrp$6l2$1@aioe.org>
Markus E.L. 2 escreveu:
> 
[snipped]

> Personally I prefer to fly in air crafts whose software has been
> verified, even if the verification process is deemed unreal by some.
> 

And it fails you'll be contempt that the error surely was not due wrong 
types, right!

The expectacular blow of Arianne or the errors of the Raptor fighter 
make me think that: type errors are just a very small part of the issues 
we have to verification of programs.

>> And a proof only holds within the set of axioms that defines it.  The real
>> world can be curlier than that.  Sometimes being unable to disprove a
>> theory is as good as you can reasonably get.
> 
> That is the kind of philosophy that really gets my hair
> up. Fortunately they don't let people with that convictions at
> airospace development or nuclear plants or even train control (or
> those people are required to leave their convictions at the door).

Now it is my time to question you about your obvious lack of knowledge 
in these fields right, Markus!?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w8r6mdf41t.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>>
> [snipped]
>
>> Personally I prefer to fly in air crafts whose software has been
>> verified, even if the verification process is deemed unreal by some.
>>
>
> And it fails you'll be contempt that the error surely was not due
> wrong types, right!

What an utter piece of nonsense. The utility and actual use of program
verification has been drawn into doubt. This was my answer and still
is. *PLONK*. <expletive deleted/>

> The expectacular blow of Arianne or the errors of the Raptor fighter
> make me think that: type errors are just a very small part of the
> issues we have to verification of programs.
>
>>> And a proof only holds within the set of axioms that defines it.  The real
>>> world can be curlier than that.  Sometimes being unable to disprove a
>>> theory is as good as you can reasonably get.
>> That is the kind of philosophy that really gets my hair
>> up. Fortunately they don't let people with that convictions at
>> airospace development or nuclear plants or even train control (or
>> those people are required to leave their convictions at the door).
>
> Now it is my time to question you about your obvious lack of knowledge
> in these fields right, Markus!?

That's OK with me. You obviously missed (and snipped) the OPs
implication that we can ditch verification because its utility depends
on it being set up in the right context (right axioms, right
model). Not that I care if you miss anything.

- M
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9gkf3$740$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Markus E.L. 2 escreveu:
>> [snipped]
>>
>>> Personally I prefer to fly in air crafts whose software has been
>>> verified, even if the verification process is deemed unreal by some.
>>>
>> And it fails you'll be contempt that the error surely was not due
>> wrong types, right!
> 
> What an utter piece of nonsense. The utility and actual use of program
> verification has been drawn into doubt. This was my answer and still
> is. *PLONK*. <expletive deleted/>

No Sir! You've too high expectations towards _your_ affirmations! What I 
have put (and stand the assertion) is that your personal preference is 
any indication of authority...

If your ego is so inflated for not perceiving that, I recommend a 
psychiatrist for specific therapy. You need urgent!

> 
>> The expectacular blow of Arianne or the errors of the Raptor fighter
>> make me think that: type errors are just a very small part of the
>> issues we have to verification of programs.
>>
>>>> And a proof only holds within the set of axioms that defines it.  The real
>>>> world can be curlier than that.  Sometimes being unable to disprove a
>>>> theory is as good as you can reasonably get.
>>> That is the kind of philosophy that really gets my hair
>>> up. Fortunately they don't let people with that convictions at
>>> airospace development or nuclear plants or even train control (or
>>> those people are required to leave their convictions at the door).
>> Now it is my time to question you about your obvious lack of knowledge
>> in these fields right, Markus!?
> 
> That's OK with me. You obviously missed (and snipped) the OPs
> implication that we can ditch verification because its utility depends
> on it being set up in the right context (right axioms, right
> model). Not that I care if you miss anything.
> 

No, what you missed is my point of your arrogance you've shown in this 
thread. . . comb your hair and learn a little more before claiming we 
already a final Equation of the Universe.

For an example on the failure on "axioms" and "pure reasoning" in 
engineering find my another post about an interesting experiment done in 
early XX century.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <c7y7gjzgs5.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Markus E.L. 2 escreveu:
>>> [snipped]
>>>
>>>> Personally I prefer to fly in air crafts whose software has been
>>>> verified, even if the verification process is deemed unreal by some.
>>>>
>>> And it fails you'll be contempt that the error surely was not due
>>> wrong types, right!
>> What an utter piece of nonsense. The utility and actual use of
>> program
>> verification has been drawn into doubt. This was my answer and still
>> is. *PLONK*. <expletive deleted/>
>
> No Sir! You've too high expectations towards _your_ affirmations! What
> I have put (and stand the assertion) is that your personal preference
> is any indication of authority...

My ego is so inflated, it even can't parse this sentence. My
impression is, that a key part of the sentence is missing.

> If your ego is so inflated for not perceiving that, I recommend a
> psychiatrist for specific therapy. You need urgent!

<...>


> No, what you missed is my point of your arrogance you've shown in this
> thread. . . comb your hair and learn a little more before claiming we
> already a final Equation of the Universe.
>
> For an example on the failure on "axioms" and "pure reasoning" in
> engineering find my another post about an interesting experiment done
> in early XX century.

I'm certain that it is only my arrogance that makes me imagine that
verfication is in active use for embedded control systems. What I find
especially endearing is your the notion (you also seem adgering to)
that we can dispense with verification and proof within verification
because there are other source of errors. Never mind that the trend of
view in professional circles has been to the contrary in at least the
last 40 years. Perhaps you're the Galileo of programmming?

Still, until you've your great coming out, I'm still grateful that
train and air traffic control (as well as medical software) is
developed by organizations which understand about quantifying risk and
_have_ _actually_ decided to verify their software.

BTW: Perhaps you could ask your psychiatrist about how the Ariane 5
accident really came about. Your ideas about that are utterly to
simple, I noticed.

-- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9kthf$rf4$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Markus E.L. 2 escreveu:
>>> Cesar Rabak wrote:
>>>
>>>> Markus E.L. 2 escreveu:
>>>> [snipped]
>>>>
>>>>> Personally I prefer to fly in air crafts whose software has been
>>>>> verified, even if the verification process is deemed unreal by some.
>>>>>
>>>> And it fails you'll be contempt that the error surely was not due
>>>> wrong types, right!
>>> What an utter piece of nonsense. The utility and actual use of
>>> program
>>> verification has been drawn into doubt. This was my answer and still
>>> is. *PLONK*. <expletive deleted/>
>> No Sir! You've too high expectations towards _your_ affirmations! What
>> I have put (and stand the assertion) is that your personal preference
>> is any indication of authority...
> 
> My ego is so inflated, it even can't parse this sentence. My
> impression is, that a key part of the sentence is missing.
> 
>> If your ego is so inflated for not perceiving that, I recommend a
>> psychiatrist for specific therapy. You need urgent!
> 
> <...>
> 
> 
>> No, what you missed is my point of your arrogance you've shown in this
>> thread. . . comb your hair and learn a little more before claiming we
>> already a final Equation of the Universe.
>>
>> For an example on the failure on "axioms" and "pure reasoning" in
>> engineering find my another post about an interesting experiment done
>> in early XX century.
> 
> I'm certain that it is only my arrogance that makes me imagine that
> verfication is in active use for embedded control systems. What I find
> especially endearing is your the notion (you also seem adgering to)
> that we can dispense with verification and proof within verification
> because there are other source of errors. Never mind that the trend of
> view in professional circles has been to the contrary in at least the
> last 40 years. Perhaps you're the Galileo of programmming?
> 

NO here you're showing another trait of yours: technical ignorance and 
recurringly making fuzzy citations.

Validation for safety systems (including embedded) is type system 
agnostic (in fact is language agnostic).

Notice that if you like your funny analogies. May you be the Keppler of 
programming?

> Still, until you've your great coming out, I'm still grateful that
> train and air traffic control (as well as medical software) is
> developed by organizations which understand about quantifying risk and
> _have_ _actually_ decided to verify their software.

Yes, which includes thorough walktroughs and testing.

> 
> BTW: Perhaps you could ask your psychiatrist about how the Ariane 5
> accident really came about. Your ideas about that are utterly to
> simple, I noticed.

No for this engineering would have sufficed: it lacked testing.

Talking about your needs for psychiatric assistance, don't forget to 
tell about your dithery as well: or the plonking above was a sissi threat?
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186863611.096808.30970@o61g2000hsh.googlegroups.com>
> > Still, until you've your great coming out, I'm still grateful that
> > train and air traffic control (as well as medical software) is
> > developed by organizations which understand about quantifying risk and
> > _have_ _actually_ decided to verify their software.
>
> Yes, which includes thorough walktroughs and testing.

Verification and testing are just the tip of the iceberg. Let me give
an example. One of the major guiding documents for the development of
safety-critical avionics software is DO-178B. There is a fairly good
description of it here: http://www.stsc.hill.af.mil/crosstalk/1998/10/schad.asp

Quoting from the paper, the basic issues are:

"How was it known that the testing was comprehensive and complete?
How was it known that the system requirements were comprehensive and
complete?
How was it known that the software requirements were comprehensive and
complete and interpreted the system requirement accurately?
How do we provide proof that a design or implementation error, which
may be present, cannot produce a safety critical situation?"

DO-178B outlines a detailed process to address these issues. The first
issue is addressed by ensuring 100% test coverage, and traceability
from high-level requirements to tests and back. The second and third
issues are addressed by requiring detailed evidence of the
verification of requirements (eg: fault-tree analysis of potential
situations), and at the highest safety levels, independent
verification of these requirements. The fourth issue is addressed by
requiring physical error situations to be considered (even ones that
verification "proved" cannot occur), and planned-for.

Note that static typing does not directly address any of these issues.
Indeed, the second two issues are precisely the ones I was harping
about earlier: automatic verification may guarantee that the
implementation fits a particular type model, but provides _no_
guarantees that the type model is accurate. Static typing does help
address the issue of ensuring that the implementation matches the
software requirements, but the lack of expressiveness of existing type
systems suggests that the requirement for detailed (and often
independent) human code review is not going away any time soon.

There is another crucial paragraph which I think deserves mention:

"Evidence must be formally developed for systematic implementation,
documentation, and test or analysis that each requirement has been
incorporated and verified. In turn, the evidence must show that all
levels of requirements can be traced to all of its roots and each is
fully tested. It also must demonstrate the characteristic of
reversibility. This means one must be able to trace from each test to
its top level requirements. The total results in 100% requirement
coverage."

In other words, verification is just a piece of the issue.
Documentation must show that what was verified traces back properly to
the requirements, and compete unit and integration testing must back
up the results of the analytic verification. Even in the presence of
extensive analytic verification, testing is still the last line of
defense!
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <mn3aypu3u0.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Markus E.L. 2 escreveu:
>>>> Cesar Rabak wrote:
>>>>
>>>>> Markus E.L. 2 escreveu:
>>>>> [snipped]
>>>>>
>>>>>> Personally I prefer to fly in air crafts whose software has been
>>>>>> verified, even if the verification process is deemed unreal by some.
>>>>>>
>>>>> And it fails you'll be contempt that the error surely was not due
>>>>> wrong types, right!
>>>> What an utter piece of nonsense. The utility and actual use of
>>>> program
>>>> verification has been drawn into doubt. This was my answer and still
>>>> is. *PLONK*. <expletive deleted/>
>>> No Sir! You've too high expectations towards _your_ affirmations! What
>>> I have put (and stand the assertion) is that your personal preference
>>> is any indication of authority...
>> My ego is so inflated, it even can't parse this sentence. My
>> impression is, that a key part of the sentence is missing.
>>
>>> If your ego is so inflated for not perceiving that, I recommend a
>>> psychiatrist for specific therapy. You need urgent!
>> <...>
>>
>>> No, what you missed is my point of your arrogance you've shown in this
>>> thread. . . comb your hair and learn a little more before claiming we
>>> already a final Equation of the Universe.
>>>
>>> For an example on the failure on "axioms" and "pure reasoning" in
>>> engineering find my another post about an interesting experiment done
>>> in early XX century.
>> I'm certain that it is only my arrogance that makes me imagine that
>> verfication is in active use for embedded control systems. What I find
>> especially endearing is your the notion (you also seem adgering to)
>> that we can dispense with verification and proof within verification
>> because there are other source of errors. Never mind that the trend of
>> view in professional circles has been to the contrary in at least the
>> last 40 years. Perhaps you're the Galileo of programmming?
>>
>
> NO 

Fine. So we can dispense with your more sweeping over-generalization
without the danger of being wrong in the future face of history.

> here you're showing another trait of yours: technical ignorance and
> recurringly making fuzzy citations.

Like I even made a citation. Looking back over the irrational dialog
with you (that is: irrational at your side) I can't avoid the
impression that you're actually crave to be called a troll -- or your
language is indicative of a slight Tourette syndrome.

> Validation for safety systems (including embedded) is type system
> agnostic (in fact is language agnostic).

Not that I (or anyone here) even stipulated that it would be gnostic
(I assume you mean "rely on" or "depend on"). If you go back some 6 or
10 posts you'll find that someone, I think it was Andrew Reilly sort
of implied that we can dispense with proof in verification and perhaps
with verification at all, because it has/is "not a guarantee of much
that is real" because errors might creep in to other sources (like the
setting up of the axioms). To which I replied that I'm (a) glad that
this attitude is, thankfully, not main stream thinking in the industry
which actually supplies software for safety critical systems and (b)
that I really really like to move in vehicles with verified software
as opposed to unverified software despite unusability of the method
postulated by AR. (Hint: There is a wide spectrum between certainty
(100% sure) and absolute insignificance: In the industry we call that
confidence and it's considered more than superstition).

Nowhere has been implied that (a) verification requires static typing
or (b) static typing catches all errors. 

But I now stipulate that the presence of static typing furthers
verification and indeed enforces program structures that are more
amenable to (a) review and (b) automatic and semiautomatic methods of
verification and proof.

And typing of course catches stupid little errors like wrecking the
software during maintenance (since this has a high probability of
breaking the type system too).

Since I like funny analogies: Typing is the little brother of
verification. It's built in for free in some languages.

BTW: You might be surprised: The most effective method of catching
errors is formal review: Not testing, not verification, not static
typing. This just for you jokers here that always try to play unit
testing against static typing. There is not either-or: Only combining
methods yields sufficient confidence and source quality.

(The "jokers" is due to the realization that talking rationally is
probably wasted to people like you).

> Notice that if you like your funny analogies. 

"If I like (my) funny analogies", then what -- again another sentence
I can't parse. You seem a bit over excited. I wonder wether you really
know/remember in which sub thread you're posting. That would certainly
explain your confusion about what has been claimed or not or said in
this sub thread.

> May you be the Keppler of programming?

Which "Keppler" do you mean
 
  http://en.wikipedia.org/wiki/Keppler
  http://de.wikipedia.org/wiki/Keppler

the SS officer, the war criminal, the ski champion or the theologian?
Probably the theologian ('cause I so firmly believe in static typing),
but I wouldn't out it above you to accuse me of crimes against
humanity for this ...


>> Still, until you've your great coming out, I'm still grateful that
>> train and air traffic control (as well as medical software) is
>> developed by organizations which understand about quantifying risk and
>> _have_ _actually_ decided to verify their software.
>
> Yes, which includes thorough walktroughs and testing.

I cannot imagine that invalidates the verification they do.

>> BTW: Perhaps you could ask your psychiatrist about how the Ariane 5
>> accident really came about. Your ideas about that are utterly to
>> simple, I noticed.
>
> No for this engineering would have sufficed: it lacked testing.

Interesting enough it lacked verification, i.e. checking the
specification against the implementation. Testing with simulated
Ariane 5 flight conditions should probably have revealed the error,
but so would have re-checking the assumptions that had been made when
checking the "re-alignment code" for the Ariane 4 for the new Ariane 5
environment

> Talking about your needs for psychiatric assistance, don't forget to
> tell about your dithery as well: or the plonking above was a sissi
> threat?

Uuuuuh, ouuchh, uuuuuhhh. That certainly HURT, man. Terrible. I'm
totally destroyed, like.

No, actually I'm sometimes checking the filter sump, sometimes when
going back on a thread (there are still people answering to your
posts, even if it is only Rayiner, your partner in crime).

So long -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9n96u$k0k$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
[snipped]
> 
> Fine. So we can dispense with your more sweeping over-generalization
> without the danger of being wrong in the future face of history.

Yes. It is a good start of your part! Let's see how we fare on this. . .

> 
>> here you're showing another trait of yours: technical ignorance and
>> recurringly making fuzzy citations.
> 
> Like I even made a citation. Looking back over the irrational dialog
> with you (that is: irrational at your side) I can't avoid the
> impression that you're actually crave to be called a troll -- or your
> language is indicative of a slight Tourette syndrome.

Well... this is expected: you don't like to be contradicted, right!? In 
fact, even after several posts where you wrote you've plonk me you 
cannot resist and keep answering...

> 
>> Validation for safety systems (including embedded) is type system
>> agnostic (in fact is language agnostic).
> 
> Not that I (or anyone here) even stipulated that it would be gnostic
> (I assume you mean "rely on" or "depend on"). If you go back some 6 or
> 10 posts you'll find that someone, I think it was Andrew Reilly sort
> of implied that we can dispense with proof in verification and perhaps
> with verification at all, because it has/is "not a guarantee of much
> that is real" because errors might creep in to other sources (like the
> setting up of the axioms). 

Right.

> To which I replied that I'm (a) glad that
> this attitude is, thankfully, not main stream thinking in the industry
> which actually supplies software for safety critical systems and (b)
> that I really really like to move in vehicles with verified software
> as opposed to unverified software despite unusability of the method
> postulated by AR. (Hint: There is a wide spectrum between certainty
> (100% sure) and absolute insignificance: In the industry we call that
> confidence and it's considered more than superstition).

The issue here is that in industry "Validation" and "Verification" don't 
have the meaning you're giving to. It has more to do with bureaucratic 
practices on following certain regulations and project guidelines than 
am mathematical proof (by which stretch we could want to give to it).

> 
> Nowhere has been implied that (a) verification requires static typing
> or (b) static typing catches all errors. 

Great! We approaching the industry experience.

> 
> But I now stipulate that the presence of static typing furthers
> verification and indeed enforces program structures that are more
> amenable to (a) review and (b) automatic and semiautomatic methods of
> verification and proof.

This stipulation still has the jury out. As has been said in this thread 
already once you attempt an [semi]automatic method of verification and 
proof you get back to square one in the way you specify (requirements) 
the proof.

Also, the present primitive types being too low level are not expressive 
enough for the larger systems we have economic interest on.

In fact the problem is more of strict type system (which dynamic 
languages have [like Lisp]) than discussion between static versus dynamic.

> 
> And typing of course catches stupid little errors like wrecking the
> software during maintenance (since this has a high probability of
> breaking the type system too).

OK. This is not an attribute of static systems only.

> 
> Since I like funny analogies: Typing is the little brother of
> verification. It's built in for free in some languages.

Again, dynamic languages have typing and can benefit of it as well.

> 
> BTW: You might be surprised: The most effective method of catching
> errors is formal review: Not testing, not verification, not static
> typing. This just for you jokers here that always try to play unit
> testing against static typing. There is not either-or: Only combining
> methods yields sufficient confidence and source quality.

Yes, formal review is very strong to catch errors specially due our 
imperfect way of specifying completely systems. Also it is type system 
agnostic.

> 
> (The "jokers" is due to the realization that talking rationally is
> probably wasted to people like you).

Don't waste your time with me then! Be consistent with your arrogance!
> 
>> Notice that if you like your funny analogies. 
> 
> "If I like (my) funny analogies", then what -- again another sentence
> I can't parse. You seem a bit over excited. 

I see: you love to tease others and then get stuck when others play your 
own game...

> I wonder wether you really
> know/remember in which sub thread you're posting. That would certainly
> explain your confusion about what has been claimed or not or said in
> this sub thread.
> 
>> May you be the Keppler of programming?
> 
> Which "Keppler" do you mean
>  
>   http://en.wikipedia.org/wiki/Keppler
>   http://de.wikipedia.org/wiki/Keppler
> 
> the SS officer, the war criminal, the ski champion or the theologian?
> Probably the theologian ('cause I so firmly believe in static typing),
> but I wouldn't out it above you to accuse me of crimes against
> humanity for this ...

And you write I'm excited! You must be stoned then!

> 
>>> Still, until you've your great coming out, I'm still grateful that
>>> train and air traffic control (as well as medical software) is
>>> developed by organizations which understand about quantifying risk and
>>> _have_ _actually_ decided to verify their software.
>> Yes, which includes thorough walktroughs and testing.
> 
> I cannot imagine that invalidates the verification they do.
> 
>>> BTW: Perhaps you could ask your psychiatrist about how the Ariane 5
>>> accident really came about. Your ideas about that are utterly to
>>> simple, I noticed.
>> No for this engineering would have sufficed: it lacked testing.
> 
> Interesting enough it lacked verification, i.e. checking the
> specification against the implementation. Testing with simulated
> Ariane 5 flight conditions should probably have revealed the error,
> but so would have re-checking the assumptions that had been made when
> checking the "re-alignment code" for the Ariane 4 for the new Ariane 5
> environment

Nice you mention this, because is exactly was I'm trying to point out: 
the "verification" and "validation" processes in practice in industry 
are more of having comfort that certain formalities are being through. 
In some specific part of the process a part 'convinced' another that a 
certain module was pre-tested and to catch up due dates spare some 
monies the tests/walktroughs, weren't made.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhf6ulls4ssfe@corp.supernews.com>
Andrew Reilly wrote:
> The claim that is mostly being made is that 
> the relationship between formal systems capable of supporting proofs and
> informal systems capable of being tested is much the same as that between
> mathematics and the observable universe.  Close and useful/beautiful, but
> not a guarantee of much that is real.
> 
> And a proof only holds within the set of axioms that defines it.  The real
> world can be curlier than that.  Sometimes being unable to disprove a
> theory is as good as you can reasonably get.

Just as mathematics is not used by physicists because it is useless in
practice?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.07.23.38.57.434376@areilly.bpc-users.org>
On Tue, 07 Aug 2007 19:32:47 +0100, Jon Harrop wrote:

>> And a proof only holds within the set of axioms that defines it.  The real
>> world can be curlier than that.  Sometimes being unable to disprove a
>> theory is as good as you can reasonably get.
> 
> Just as mathematics is not used by physicists because it is useless in
> practice?

It's not useless, it just isn't the final word.  The final word is
checking to see whether the universe behaves the way you predicted in the
theory that you derived by mathematically extrapolating from the known
axioms/theories. By experiment. By testing the theory.

The universe exists independently of the mathematics we use to describe it.

-- 
Andrew
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk5jc1clsog26@corp.supernews.com>
Andrew Reilly wrote:
> On Tue, 07 Aug 2007 19:32:47 +0100, Jon Harrop wrote:
>> Just as mathematics is not used by physicists because it is useless in
>> practice?
> 
> It's not useless, it just isn't the final word.  The final word is
> checking to see whether the universe behaves the way you predicted in the
> theory...

No, that isn't the final word either and this is exactly the point. Just
because your theory happens to match your observations does not mean it is
correct. Just as software passing tests is not necessarily correct.

Ideally, you want both verification and testing. This is why we all do both
static analysis and testing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f97sgs$82t$1@news.xmission.com>
Rayiner Hashem wrote:

> [...] No physical
> law can be proven to be correct, they are all "merely" validated by
> empirical observation. [...]

What about the concept of empirical verifiability itself? Can that be 
empirically verified?

:-)

-thant
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186466808.080518.198440@o61g2000hsh.googlegroups.com>
On 6 Aug., 21:24, Thant Tessman <····@standarddeviance.com> wrote:
> Rayiner Hashem wrote:
> > [...] No physical
> > law can be proven to be correct, they are all "merely" validated by
> > empirical observation. [...]
>
> What about the concept of empirical verifiability itself? Can that be
> empirically verified?
>
> :-)

No. The law of cause and effect is biologically engrained in our
brains and we have to assume it (consciously or not) a priori.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8dir7r1g0s.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 6 Aug., 21:24, Thant Tessman <····@standarddeviance.com> wrote:
>> Rayiner Hashem wrote:
>> > [...] No physical
>> > law can be proven to be correct, they are all "merely" validated by
>> > empirical observation. [...]
>>
>> What about the concept of empirical verifiability itself? Can that be
>> empirically verified?
>>
>> :-)
>
> No. The law of cause and effect is biologically engrained in our
> brains and we have to assume it (consciously or not) a priori.

Actually it has been empirically verified (for the macroscopic level)
by the very fact that we are here (were evolutionary successful).

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <c1k5sa7hb4.fsf@hod.lan.m-e-leypold.de>
Frank Buss wrote:

> Rayiner Hashem wrote:
>
>> Yet, physics, biology, chemistry, and engineering manage to get by
>> just fine with empirical observation. Planes, bridges, and clones get
>> created without ever being rigorously type-checked...
>
> The "type system", i.e. constraints of engineering are the physical laws.
> Lisp is more like art, without much constraints. Both can be useful.


Indeed that is not quite right: In natural sciences physical units
often assume the role of a type system.

-- markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186417274.314674.291630@w3g2000hsg.googlegroups.com>
> Indeed that is not quite right: In natural sciences physical units
> often assume the role of a type system.

Indeed, a trivial, unexpressive one, much like existing static type
systems...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhf12i81os2fd@corp.supernews.com>
Frank Buss wrote:
> The "type system", i.e. constraints of engineering are the physical laws.
> Lisp is more like art, without much constraints. Both can be useful.

I see Lispers as more akin to Biologists, i.e. they're the programmers who
can't do maths and don't understand proof.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pohcnbgo2q.fsf@hod.lan.m-e-leypold.de>
Jon Harrop wrote:

> Frank Buss wrote:
>> The "type system", i.e. constraints of engineering are the physical laws.
>> Lisp is more like art, without much constraints. Both can be useful.
>
> I see Lispers as more akin to Biologists, i.e. they're the programmers who
> can't do maths and don't understand proof.

Boo, Jon. That certainly won't help deescalation. I'm used to better
things from you.

- M
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk2anfe7tqcbb@corp.supernews.com>
Markus E.L. 2 wrote:
> Jon Harrop wrote:
>> Frank Buss wrote:
>>> The "type system", i.e. constraints of engineering are the physical
>>> laws. Lisp is more like art, without much constraints. Both can be
>>> useful.
>>
>> I see Lispers as more akin to Biologists, i.e. they're the programmers
>> who can't do maths and don't understand proof.
> 
> Boo, Jon. That certainly won't help deescalation. I'm used to better
> things from you.

Objectively, look at the number of people confusing proof with testing in
this thread and look at the proportion of posts about mathematics on the
Haskell, OCaml and Lisp forums.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186523672.749164.239890@q75g2000hsh.googlegroups.com>
On Aug 7, 2:29 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Frank Buss wrote:
> > The "type system", i.e. constraints of engineering are the physical laws.
> > Lisp is more like art, without much constraints. Both can be useful.
>
> I see Lispers as more akin to Biologists, i.e. they're the programmers who
> can't do maths and don't understand proof.

Proofs are for mathematicians. Science is the domain of the
fundamentally improvable. No scientific theory is anything other than
a model for predicting behavior. In certain cases, these models are
believed to be concrete descriptions of the underlying reality, and in
many cases they are known to be mere approximations of the underlying
reality, but none are anything approaching a proof.

Since you mentioned science and physics, I thought I'd point out
something ironic in the context of this discussion. The programming
language most often used by scientists and engineers is dynamically-
typed in the extreme: Matlab.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk59hkmlk4d1d@corp.supernews.com>
Rayiner Hashem wrote:
> Proofs are for mathematicians. Science is the domain of the
> fundamentally improvable. No scientific theory is anything other than
> a model for predicting behavior. In certain cases, these models are
> believed to be concrete descriptions of the underlying reality,

Science is not really about our "underlying reality".

> and in 
> many cases they are known to be mere approximations of the underlying
> reality,

Yes. Although there is nothing "mere" about approximations. We could never
have built the technology that we have without making approximations.

> but none are anything approaching a proof. 

Science is largely about constructing mathematical models of the natural
world and using them to make predictions.

You seem to be trying to drive a wedge between science and mathematics/proof
but that is fruitless. Mathematical proofs are a core part of all
scientific disciplines, whether it is drug absorption rates in
pharmacokinetics or photon pressure from stars in astrophysics.

For example, I tried to derive the properties of a new wavelet by hand in
chapter 3 of my PhD thesis. I even plugged a few numbers in an checked that
the results were reasonable. However, I had little faith in the results
until I had automated the symbolic derivation with the help of Mathematica.

I apply the same approach to programming by leveraging static type systems.
One day, I hope to master theorem provers so that I can apply them to my
programs as well.

> Since you mentioned science and physics, I thought I'd point out
> something ironic in the context of this discussion. The programming
> language most often used by scientists and engineers is dynamically-
> typed in the extreme: Matlab.

I have no idea what gave you the idea that Matlab is the most popular
programming language among scientists. Most scientists use Fortran, C, C++,
Java and even Perl/Python.

Matlab might well be the most popular proprietary language used by engineers
though.

You may also be interested to know that Matlab is partly written in OCaml...

-- 
Dr Jon D Harrop BA MA MSci PhD (Cantab), Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186620534.044863.260600@o61g2000hsh.googlegroups.com>
> Science is not really about our "underlying reality".

Science is useless without a connection to the underlying reality. A
mathematical system is correct and possibly even useful even if the
axioms bear no resemblance to facts observed in reality, but a
"science" whose "laws" bear no resemblance to empirical observation is
useless, and not even a science.

> Science is largely about constructing mathematical models of the natural
> world and using them to make predictions.

Right, but this has what to do with proof?

> Mathematical proofs are a core part of all
> scientific disciplines, whether it is drug absorption rates in
> pharmacokinetics or photon pressure from stars in astrophysics.

I suppose you could make an argument that the derivation of a result
from more basic physical "laws" is analogous to the process of proving
a theorem from fundamental axioms. I think this analogy has some
validity, say if you're talking about the derivation of the equation
for electron tunneling from the wave equation. But what is the
analogue of deriving tractable equations for physical systems by
making a bunch of assumptions and throwing away all the higher-order
terms?

Bringing the question back to reality, you have to consider: what is
the value of a proof (or derivation). A proof or derivation tells you
that something is true within the context of the axioms and
assumptions from which that proof was derived. One can consider a
proof of a program property with respect to a type model to be useful,
because we believe that type model to reflect some truth about the
world. What is the value of a derivation when we have limited
confidence that the laws from which they were derived reflect
something real? In the context of most engineering fields, this value
is reflected in the fact that theoretical results are multiplied by
some factor to guarantee real-world safety, and even then subject to
extensive testing to verify that safety!

> I have no idea what gave you the idea that Matlab is the most popular
> programming language among scientists. Most scientists use Fortran, C, C++,
> Java and even Perl/Python.

I suppose it depends on where you draw the line between scientists and
engineers. Most of the people I know working in engineering or
engineering sciences know and use Matlab, very few know and use C or
FORTRAN. Matlab gets tremendous use for prototyping algorithms/doing
data analysis.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmh7bmrirtpe0@corp.supernews.com>
Rayiner Hashem wrote:
>> Science is not really about our "underlying reality".
> 
> Science is useless without a connection to the underlying reality. A 
> mathematical system is correct and possibly even useful even if the
> axioms bear no resemblance to facts observed in reality, but a
> "science" whose "laws" bear no resemblance to empirical observation is
> useless, and not even a science.

For centuries, Newtonian mechanics were thought to represent the underlying
reality of our universe. Observations in the early 1900s showed this to be
wrong: Newtonian mechanics is an approximation.

However, the finding out that Newtonian mechanics was "wrong" did not make
it useless. Newtonian mechanics are still far more widely used to create
new technology than general relativity.

>> Science is largely about constructing mathematical models of the natural
>> world and using them to make predictions.
> 
> Right, but this has what to do with proof?

The mathematical models are composed of proofs.

>> I have no idea what gave you the idea that Matlab is the most popular
>> programming language among scientists. Most scientists use Fortran, C,
>> C++, Java and even Perl/Python.
> 
> I suppose it depends on where you draw the line between scientists and
> engineers. Most of the people I know working in engineering or
> engineering sciences know and use Matlab, very few know and use C or
> FORTRAN. Matlab gets tremendous use for prototyping algorithms/doing
> data analysis.

I'm not sure what you mean by "engineering science" but I know dozens of
research scientists and none are Matlab users. I have known only three
Matlab users and all were engineers (one is now a computer scientist).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9qh58$1cs$1@news.xmission.com>
Jon Harrop wrote:
> Rayiner Hashem wrote:
>>> Science is not really about our "underlying reality".
>> Science is useless without a connection to the underlying reality. A 
>> mathematical system is correct and possibly even useful even if the
>> axioms bear no resemblance to facts observed in reality, but a
>> "science" whose "laws" bear no resemblance to empirical observation is
>> useless, and not even a science.
> 
> For centuries, Newtonian mechanics were thought to represent the underlying
> reality of our universe. Observations in the early 1900s showed this to be
> wrong: Newtonian mechanics is an approximation. [...]

Actually, this is unfair to Newtonian mechanics. Newton wrote his 
equations in terms of momentum, thus avoiding the assumption that mass 
was constant. And he made explicit his assumption that time was the 
'same' everywhere. He clearly entertained the notion that he could be 
wrong about that.

Science is the assumption that the world around us has structure that is 
amenable to discovery through our intellect. It's not about prediction 
and it's not about technology. These are consequences. They follow from 
science, but they are not what science is. Mathematics is not science 
either, but it *is* the language of science. The important point about 
math is not proofs as such, but logical consistency.

For example, we observe that (absent other forces such as wind 
resistance) objects fall in a gravitational field at a rate independent 
of their weight. This is not merely an empirical fact. Logic demands it 
be so. Imagine tying a hammer to one end of a rope and an anvil to the 
other, and dropping both from the roof of a tall building. If lighter 
objects fell more slowly, wouldn't the hammer slow down the anvil? What 
if we instead used the rope to bind the objects tightly together? 
Wouldn't the combined weight of both objects now cause this new bigger 
object to fall faster?

That things fall is empirical. The nature of the motion, however, is 
characterized by an "underlying reality" that we can perceive only 
indirectly through rational analysis. We call this rational analysis 
'science.'

A type system is a structure imposed on a computer program in such a way 
that a machine can check for logical consistency. This, like science, 
has some positive consequences: fewer bugs and better performance. But 
our understanding of type systems, like our understanding of 
bridge-building, is still advancing and will never be complete. I don't 
think there is any deeper moral lesson than this to be teased out of any 
strained parallel between computer programming and science versus 
engineering.

-thant
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187041980.564628.230390@g4g2000hsf.googlegroups.com>
> Science is the assumption that the world around us has structure that is
> amenable to discovery through our intellect. It's not about prediction
> and it's not about technology. These are consequences. They follow from
> science, but they are not what science is.

This is a vast over-simplification of what is a large and complex
debate. There are a lot of camps, and more than one would reject the
idea that the structure of the world can be discovered through
intellect (which smacks of rationalism to me).

At a very real level, intellect can tell you nothing about the world
as it is, merely about the world as it might be. Actually, empirical
testing cannot tell you anything about the world as it is either, but
can at least tell you something about the world as you perceive it to
be.

> For example, we observe that (absent other forces such as wind
> resistance)

If the aim is to ascertain truth, what is the point of starting from a
premise we know to be false? If the aim is to establish useful
knowledge, then such reasoning can be justified, based on experimental
evidence suggesting that the premise might be "close enough" to
reality to allow the reasoning to lead to useful results.

Ultimately, we cannot ascertain truths about the world as it is. We
may derive truths about the world as it may be, but all such
derivations are inherently grounded in assumptions --- assumptions
which, for the results of the derivation to be at all relevant, must
be rooted in empirical evidence.
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9qmc7$tut$1@news.xmission.com>
Rayiner Hashem wrote:
>> Science is the assumption that the world around us has structure that is
>> amenable to discovery through our intellect. It's not about prediction
>> and it's not about technology. These are consequences. They follow from
>> science, but they are not what science is.
> 
> This is a vast over-simplification of what is a large and complex
> debate. There are a lot of camps, and more than one would reject the
> idea that the structure of the world can be discovered through
> intellect (which smacks of rationalism to me).

You say that like it's a bad thing. :-)

I do know that (for some psychological reason I'm just beginning to 
understand) rationalism annoys the hell out of a lot of people, but as I 
hinted at elsewhere in this never-ending thread, empiricism--at least in 
its purest form--is trivially refuted. All you have to do is ask 
yourself: Is the principle of empirical falsifiability itself 
empirically falsifiable?

[...]

-thant
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187054460.237792.77850@l70g2000hse.googlegroups.com>
> rationalism annoys the hell out of a lot of people

Among others, David Hume...

To be snarky, one can say that modern philosophy has obsoleted
rationalism in much the same way as modern physics has obsoleted
newtonian mechanics...

> hinted at elsewhere in this never-ending thread, empiricism--at least in
> its purest form--is trivially refuted. All you have to do is ask
> yourself: Is the principle of empirical falsifiability itself
> empirically falsifiable?

The inability to falsify the empirical method does not bother
empiricists so much, but it seems to trouble rationalists a great
deal.

Ultimately, no method can give you truth. Both give you "truth like
things", contingent either on axioms in the case of rationalist
methods, or the limitations of experiment and perception, in the case
of empirical methods. Any hybrid (laws based on experiment, prediction
based on derivation), is saddled with the flaws of both. Given this
sad state of affairs, setting a lower bar for truth (it was "true" the
last 10 times I tested it) does not strike empiricists as such a bad
idea.
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9r510$esl$1@news.xmission.com>
Rayiner Hashem wrote:

>> rationalism annoys the hell out of a lot of people
> 
> Among others, David Hume...

David Hume could out-consume Schopenhauer and Hegel.


> The inability to falsify the empirical method does not bother
> empiricists so much, but it seems to trouble rationalists a great
> deal. [...]

...which is part of what clued me into the fact that for empiricists, 
the ability to so casually brush off a logical conundrum (nay, viciously 
ignore it) is a psychological phenomenon.

-thant
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c1mlbcn16lmce@corp.supernews.com>
Thant Tessman wrote:
> Jon Harrop wrote:
>> For centuries, Newtonian mechanics were thought to represent the
>> underlying reality of our universe. Observations in the early 1900s
>> showed this to be wrong: Newtonian mechanics is an approximation. [...]
> 
> Actually, this is unfair to Newtonian mechanics. Newton wrote his
> equations in terms of momentum, thus avoiding the assumption that mass
> was constant. And he made explicit his assumption that time was the
> 'same' everywhere. He clearly entertained the notion that he could be
> wrong about that.

He also made explicit his assumption of a static universe, IIRC.

> Science is the assumption that the world around us has structure that is
> amenable to discovery through our intellect. It's not about prediction
> and it's not about technology.

The ancient Greeks tried that thousands of years ago and it turned out to be
very limited in utility.

Prediction has been an integral part of science for hundreds of years now.
We invent falsifiable predictions formulated as hypotheses and test them
experimentally. That's how all modern scientific theories evolve.

> A type system is a structure imposed on a computer program in such a way
> that a machine can check for logical consistency. This, like science,
> has some positive consequences: fewer bugs and better performance. But
> our understanding of type systems, like our understanding of
> bridge-building, is still advancing and will never be complete. I don't
> think there is any deeper moral lesson than this to be teased out of any
> strained parallel between computer programming and science versus
> engineering.

I think that is a good summary.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9qn2g$5m3$1@news.xmission.com>
Jon Harrop wrote:
> Thant Tessman wrote:

[...]

>> Science is the assumption that the world around us has structure that is
>> amenable to discovery through our intellect. It's not about prediction
>> and it's not about technology.
> 
> The ancient Greeks tried that thousands of years ago and it turned out to be
> very limited in utility.

[...]

I think you think I'm saying something I didn't actually say. Maybe it 
sounded too much like something you read somewhere else.

-thant
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c20u759arv3ec@corp.supernews.com>
Thant Tessman wrote:
> I think you think I'm saying something I didn't actually say. Maybe it
> sounded too much like something you read somewhere else.

I was responding to "science ... is not about prediction".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9r6gf$u3k$1@news.xmission.com>
Jon Harrop wrote:
> Thant Tessman wrote:
>> I think you think I'm saying something I didn't actually say. Maybe it
>> sounded too much like something you read somewhere else.
> 
> I was responding to "science ... is not about prediction".

Science is a dialog with the universe. This implies that it is also 
about prediction, but that would be missing the point.

-thant
From: Kaz Kylheku
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187122851.320974.148780@d55g2000hsg.googlegroups.com>
On Aug 13, 2:06 pm, Thant Tessman <····@standarddeviance.com> wrote:
> A type system is a structure imposed on a computer program in such a way
> that a machine can check for logical consistency.

This presents an incomplete, biased view of the role of types in
programming.  Types arise because of the flexibility of representation
within the computing machine. A given group of binary digits in memory
represents information by some rules which govern its interpretation,
and the set of permissible operations. These conventions together make
up what we call type. Type tells us that some group of bits is a
floating-point number, pointer to a linked list, integer, a UNICODE
character, etc. The type information may be implicit: we are confident
that some group of bits is an integer, because from the structure of
the program it appears that the data originated from a function that
reliably constructs only integers. Or the type information could be
encoded with a few bits in the data item itself.

In any case, we do not, for instance, have both character strings and
integers in a programming language merely so that we check a program
for logical consistency. The data held by character strings could also
be arithmetically coded in integers, which could indeed lead to
certain undetected errors whereby some inappropriate integer operation
is applied that doesn't make sense over the encoded character string
data. But that isn't the fundamental reason why we have a character
string data type. We have such a data type for representational
convenience and clarity. Additionally, because we have these types, we
can also perform error checking: we can have operations which are only
appropriate for integers and those which are only appropriate for
strings, and verify that they operate on the expected data type.

Type checking is a necessary consequence of the flexibility of the
machine to use the same storage to represent anything that we want,
due to the errors which become possible thanks to that flexibility.
However, the drama of data typing is played out even when there is no
error checking. Data types still exist in machine language programs,
where a misinterpretation of an object by an incorrect operation is a
silent error.

Type information is not only used for error checking. It's also used,
at run time or compile time, to select an appropriate operation
(polymorphism). Polymorphism is useful independently of the presence
of error checking.

Moreover, the type information in a program isn't necessarily
restricted by the boundaries of the type system of the programming
language. Any enumeration value which is used to classify an object,
or some member part of it, is also type information.

> This, like science,
> has some positive consequences: fewer bugs and better performance.

When the programming language has an inflexible type system (for
instance, lacking adequate run-time typing facilities) then
programmers quite easily compensate for that in their programs by
inventing their own type information, encoded in the domain values of
the available data types, such as integers, enumerated types, symbols
and strings. The resulting program may pass the strict error checks
imposed by the programming language implementation, yet be full of
type errors within it own ad-hoc type system. That ad-hoc type system
may also be poorly optimized, dragging down the performance of the
program.

So, in short, the view that statically checked type systems banish
errors and improve performance is somewhat naive and myopic.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xmywtu9p3.fsf@ruckus.brouhaha.com>
Kaz Kylheku <········@gmail.com> writes:
> This presents an incomplete, biased view of the role of types in
> programming.  Types arise because of the flexibility of representation
> within the computing machine. A given group of binary digits in memory

No I don't think so, at least in this context.  Type theory and typed
lambda calculus arose in mathematical logic long before there was such
a thing as a digital computer.  Later on, the construction was found
to be useful in programming, so type mechanisms were grafted onto
languages.  Nowadays, many languages are designed around type systems
rather than the other way around.
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9t6f1$2qc$1@news.xmission.com>
Kaz Kylheku wrote:
> On Aug 13, 2:06 pm, Thant Tessman <····@standarddeviance.com> wrote:
>> A type system is a structure imposed on a computer program in such a way
>> that a machine can check for logical consistency.
> 
> This presents an incomplete, biased view of the role of types in
> programming.  [...]

There is a difference between "types" and a "type system." The latter is 
exactly what I said it is.

-thant
From: Rob Warnock
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <-eOdnWEYuJ4xwl_bnZ2dnUVZ_rignZ2d@speakeasy.net>
Kaz Kylheku  <········@gmail.com> wrote:
+---------------
| On Aug 13, 2:06 pm, Thant Tessman <····@standarddeviance.com> wrote:
| > A type system is a structure imposed on a computer program in such a way
| > that a machine can check for logical consistency.
| 
| This presents an incomplete, biased view of the role of types in
| programming.  Types arise because of the flexibility of representation
| within the computing machine. A given group of binary digits in memory
| represents information by some rules which govern its interpretation,
| and the set of permissible operations. These conventions together make
| up what we call type. Type tells us that some group of bits is a
| floating-point number, pointer to a linked list, integer, a UNICODE
| character, etc. The type information may be implicit: we are confident
| that some group of bits is an integer, because from the structure of
| the program it appears that the data originated from a function that
| reliably constructs only integers. Or the type information could be
| encoded with a few bits in the data item itself.
+---------------

To quote myself a bit from some years before
<···························@speakeasy.net>:

[In] the BLISS language data is completely *untyped*, it is
instead the *operators* which are typed, exactly as it is in
assembler languages. Thus BLISS's "+" is typed (int x int) -> int,
while BLISS's FADR (Floating ADd and Round) operator is typed
(float x float) -> float. The following code is a legal BLISS
expression[1], though probably not something anyone would want
to do very often:

    begin local a, b;
      a := 1.0 fadr 1.0;    ! that is, 2.0
      b := .a + 1;
      .b fsbr .a            ! fsbr is floating subtraction
    end

On a machine with IEEE floating point, that block should yield a
value of roughly 2.38e-07...  ;-}
...
Oddly enough, practical exerience in BLISS showed[2] that
"type errors" were one of the *least* common sources of programmer
error in BLISS code. Much more common were misplaced/missing/extra
dots (the "contents-of" operator) and semicolons (which in BLISS
are expression *separators*, not statement terminators).
...
[2] I *think* the following paper may be where this was reported,
    but I'm not completely sure:

	Wulf, W. A., et al., "Reflections on a Systems Programming
	Language," Proceedings of the SIGPLAN Symposium on System
	Implementation Languages, Purdue University, October 197l.

    It may have been here:

	Wulf, W. A., "Systems for Systems Implementors: Some Experiences
	from Bliss," Proceedings of the FJCC, November 1972. 

    Or somewhere else entirely (such as a paper called "Why the dot?",
    which I can't find a reference to at the moment)...


-Rob

-----
Rob Warnock			<····@rpw3.org>
627 26th Avenue			<URL:http://rpw3.org/>
San Mateo, CA 94403		(650)572-2607
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186381656.036855.156890@o61g2000hsh.googlegroups.com>
On 4 Aug., 16:32, Rayiner Hashem <·······@gmail.com> wrote:
> > Look, I don't advocate the exclusion of any tests. But the bad thing
> > about tests is that they are the analogon to empirical observations.
>
> Yet, physics, biology, chemistry, and engineering manage to get by
> just fine with empirical observation. Planes, bridges, and clones get
> created without ever being rigorously type-checked...

That may be the reason why bridges still break down from time to time.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186422938.737335.111200@l70g2000hse.googlegroups.com>
> That may be the reason why bridges still break down from time to time.

When the reliability of software in statically-typed languages
approaches the failure-rate of engineering structures like bridges,
then we can have this discussion.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhetop520cjfc@corp.supernews.com>
Rayiner Hashem wrote:
> Yet, physics, biology, chemistry, and engineering manage to get by
> just fine with empirical observation. Planes, bridges, and clones get
> created without ever being rigorously type-checked...

This reminds me of a post I read elsewhere stating that Biology is the only
science devoted to the natural world. According to the author, physics was
about unnatural things, like fundamental particles and the sun.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <oavebu7hrj.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Ingo Menger escreveu:
>> On 3 Aug., 15:34, Cesar Rabak <·······@yahoo.com.br> wrote:
>>> Ingo Menger escreveu:
>>>
>>>
>>>
>>>> On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
>>>>> Jon Harrop escreveu:
>>>>> [snipped]
>>>>>> So when you say "you're going to write a test suite" you are assuming that
>>>>>> the test suite would be the same for a dynamic or static program, which is
>>>>>> not correct.
>>>>> This looks like a non sense to me. Test should prove that business
>>>>> requirements are met or no.
>>>> But then, you don't catch type errors at all, as has been claimed here
>>>> earlier.
>>>> I can easily test that a function computes the annualized interest or
>>>> whatever given an amount and an interest rate. But there is no
>>>> guarantee, that this function will never be called with arguments of a
>>>> wrong type. As Jon Harrop correctly pointed out, the type system does
>>>> just that (among other things) and reliefs you from caring about the
>>>> cases you don't think about in your wildest horror dreams.
>>> I think you miss the whole point of unit tests here: _if_ in the
>>> particular technology you're using this is a real _business_ risk, you
>>> put one or two tests for this scenario and be sure it comes out green.
>> Sorry, but I have the impression that you don't understand.
>> How do you know if it is a *business* risk?
>> Or better, when is calling a function with wrong arguments *not a
>> business* risk? Perhaps when the application that does that has no
>> relevance whatsoever.
>
> It is not a business risk if the application is designed so that the
> wrong type will not happen by design. Or we talking about an app that

"Wrong type will not happen by design" is exactly what I call typing
"manually": Checking that design is what a type system does for you --
why doing it yourself?

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9600p$r29$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Ingo Menger escreveu:
[snipped]
>>> Sorry, but I have the impression that you don't understand.
>>> How do you know if it is a *business* risk?
>>> Or better, when is calling a function with wrong arguments *not a
>>> business* risk? Perhaps when the application that does that has no
>>> relevance whatsoever.
>> It is not a business risk if the application is designed so that the
>> wrong type will not happen by design. Or we talking about an app that
> 
> "Wrong type will not happen by design" is exactly what I call typing
> "manually": Checking that design is what a type system does for you --
> why doing it yourself?
> 

Because in an static type system I have too early to choose a specific 
representation of an abstract type I'm evolving in the design whose type 
I could change as I test (perhaps even deploy earlier versions of the 
program).
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <d8wsw8is62.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Ingo Menger escreveu:
> [snipped]
>>>> Sorry, but I have the impression that you don't understand.
>>>> How do you know if it is a *business* risk?
>>>> Or better, when is calling a function with wrong arguments *not a
>>>> business* risk? Perhaps when the application that does that has no
>>>> relevance whatsoever.
>>> It is not a business risk if the application is designed so that the
>>> wrong type will not happen by design. Or we talking about an app that
>> "Wrong type will not happen by design" is exactly what I call typing
>> "manually": Checking that design is what a type system does for you --
>> why doing it yourself?
>>
>
> Because in an static type system I have too early to choose a specific
> representation of an abstract type I'm evolving in the design whose
> type I could change as I test (perhaps even deploy earlier versions of
> the program).

Static typeing doesn't preclude you from changing type representations
nor adding attributes to types. It doesn't even preclude you from
swapping out a type completely and typeing (and statically scoped
binding I should say) actually helps you to do that.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bheppoothutfb@corp.supernews.com>
Cesar Rabak wrote:
> Because in an static type system I have too early to choose a specific
> representation of an abstract type...

That has everything to do with abstraction and almost nothing to do with
static typing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9gcb0$dea$1@aioe.org>
Jon Harrop escreveu:
> Cesar Rabak wrote:
>> Because in an static type system I have too early to choose a specific
>> representation of an abstract type...
> 
> That has everything to do with abstraction and almost nothing to do with
> static typing.
> 
NO. This has everything to do with a "type" system that needs too early 
to decide that something "Number" has to be Int or Float (or whatever) 
and if divisions like 2/3 are truncated in former or rounded in later.

For example, but you get the picture.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <bb3aysdw06.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Jon Harrop escreveu:
>> Cesar Rabak wrote:
>>> Because in an static type system I have too early to choose a specific
>>> representation of an abstract type...
>> That has everything to do with abstraction and almost nothing to do
>> with
>> static typing.
>>
> NO. This has everything to do with a "type" system that needs too
> early to decide that something "Number" has to be Int or Float (or
> whatever) and if divisions like 2/3 are truncated in former or rounded
> in later.
>
> For example, but you get the picture.


No, we don't. Oh, perhaps we do, but what we get has nothing to do
with type systems and compilers ...

-- M.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <p7odhm7heg.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> No type system will be enough because during a non toy program you'll
> need to convert data from a format to another and/or  (in languages
> that allow that) coerce data from a type to another, etc. Making in
> large applications all this comfort about type checking less important
> on the whole.

Again you completely fail to grasp how type systems are used.

let my_date (* of type date ! *) = make_date(2007,12,24)
in 
  ...
   
How is that not enough? The salient point is: Types are assigned to
locations in the program (therefore the name "static"). There is only
a finite number of types at compile time (as opposed to
values/bindings at run time). Data might be (indeed often is)
converted dynamically between types. Having static typing doesn't mean
the compiler has to prove that the user will ever only input numbers
into the program. Indeed it can't: The user always inputs strings and
dynamic parsing and conversion will have to occur. Still this is
happening at one place in the program:

let input ()
    ...

    try
       let my_thingy = parse_user_input(field1 (* this is  a string *) )
       in 
          yaddayadda my_thingy (* this is of type thing *)
    with 
       Parse_error -> pop_up ("sorry this is not a valid thingy specification"); input ()
;;


See the point? Well defined types, error handling at conversion time,
no need to restrict the user input.



Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f960n8$t1a$1@aioe.org>
Markus E.L. 2 escreveu:
> 
> Cesar Rabak wrote:
> 
>> No type system will be enough because during a non toy program you'll
>> need to convert data from a format to another and/or  (in languages
>> that allow that) coerce data from a type to another, etc. Making in
>> large applications all this comfort about type checking less important
>> on the whole.
> 
> Again you completely fail to grasp how type systems are used.
> 
> let my_date (* of type date ! *) = make_date(2007,12,24)
> in 
>   ...
>    
> How is that not enough? The salient point is: Types are assigned to

NO, it is not enough. The issues I'd in mind were more in the lines the 
ones led to the Arianne's catastrophe.

They used IIRC Ada and a conversion from some 32 to 16 bits led to an 
exception which ultimately crashed the rocket.

> a finite number of types at compile time (as opposed to
> values/bindings at run time). Data might be (indeed often is)
> converted dynamically between types. Having static typing doesn't mean
> the compiler has to prove that the user will ever only input numbers
> into the program. Indeed it can't: The user always inputs strings and
> dynamic parsing and conversion will have to occur. Still this is
> happening at one place in the program:
> 
> let input ()
>     ...
> 
>     try
>        let my_thingy = parse_user_input(field1 (* this is  a string *) )
>        in 
>           yaddayadda my_thingy (* this is of type thing *)
>     with 
>        Parse_error -> pop_up ("sorry this is not a valid thingy specification"); input ()
> ;;
> 
> 
> See the point? Well defined types, error handling at conversion time,
> no need to restrict the user input.
> 
So we're back to square zero: it is the programmer that will have to 
write to a specific application the code above, right? It is the same we 
do in Smalltalk, Lisp, etc.

This will only pop at runtime also, like a dynamic language, so for the 
user very little would change, right?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <aysl6wirjr.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> No type system will be enough because during a non toy program you'll
>>> need to convert data from a format to another and/or  (in languages
>>> that allow that) coerce data from a type to another, etc. Making in
>>> large applications all this comfort about type checking less important
>>> on the whole.
>> Again you completely fail to grasp how type systems are used.
>> let my_date (* of type date ! *) = make_date(2007,12,24)
>> in   ...
>>    How is that not enough? The salient point is: Types are assigned
>> to
>
> NO, it is not enough. The issues I'd in mind were more in the lines
> the ones led to the Arianne's catastrophe.
>
> They used IIRC Ada and a conversion from some 32 to 16 bits led to an
> exception which ultimately crashed the rocket.

No, a switched of dynamic testing in code reused from Ariane 5 which
had not been verified for the new environment led to an integer
overflow which promptly destabilized the attitude control. After
trying to correct in vain the (suddenly percieved) wrong inclination
of the rocket the rocket destroyed itself.

Ada actually is statically type BTW. 




>
>> a finite number of types at compile time (as opposed to
>> values/bindings at run time). Data might be (indeed often is)
>> converted dynamically between types. Having static typing doesn't mean
>> the compiler has to prove that the user will ever only input numbers
>> into the program. Indeed it can't: The user always inputs strings and
>> dynamic parsing and conversion will have to occur. Still this is
>> happening at one place in the program:
>> let input ()
>>     ...
>>     try
>>        let my_thingy = parse_user_input(field1 (* this is  a string *) )
>>        in           yaddayadda my_thingy (* this is of type thing *)
>>     with        Parse_error -> pop_up ("sorry this is not a valid
>> thingy specification"); input ()
>> ;;
>> See the point? Well defined types, error handling at conversion time,
>> no need to restrict the user input.
>>
> So we're back to square zero: it is the programmer that will have to
> write to a specific application the code above, right? It is the same
> we do in Smalltalk, Lisp, etc.

No, actually not. The tests necessary a restricted to type boundaries
(and only to partial, i.e. non total, operations on that types) and
thus are vastly reduced.

> This will only pop at runtime also, like a dynamic language, so for
> the user very little would change, right?

You might laugh, but statically typed languages can't control what the
user puts in at the keyboard: So yes, nothing changes there. (Of
course in a statically typed language one woul check only once
immediately after receiving the string in the application shell when
the user put in e.g. his date of birth, and never later (one would
only handle the date type in the application proper), and one would
strive to implement total functions in the application core whereever
possible (a partial function being the source of run time errors
almost by necessity and/or imposing those pesky proof obligations at
the call site).

Regards -- Markus

PS: It has been nice to discuss all that but alook at the clock
    convinces me that, as free as I am managaging my own time, that it
    has been lasted long enough now. I don't think I will answer to
    much more misunderstandings of static typing (and what it's good
    for): There are good tutorials on the net about programming with
    types and its everyones one free will if he or she doesn't take
    advantage from them. But for those that don't, there is simply no
    right to be recognized as an equal opponent in discussions about
    the good and evil of static typing, simply because one doesn't
    discuss art with those not skilled in it: One teaches and that is
    quite a different relationship. So no offense -- but does it make
    sense to continue at all?
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <NOEsi.31981$Y4.1171735@phobos.telenet-ops.be>
Ingo Menger wrote:
> On 3 Aug., 03:09, Cesar Rabak <·······@yahoo.com.br> wrote:
>> Jon Harrop escreveu:
>> [snipped]
>>
>>> So when you say "you're going to write a test suite" you are assuming that
>>> the test suite would be the same for a dynamic or static program, which is
>>> not correct.
>> This looks like a non sense to me. Test should prove that business
>> requirements are met or no.
> 
> But then, you don't catch type errors at all, as has been claimed here
> earlier.
> 
> I can easily test that a function computes the annualized interest or
> whatever given an amount and an interest rate. But there is no
> guarantee, that this function will never be called with arguments of a
> wrong type. As Jon Harrop correctly pointed out, the type system does
> just that (among other things) and reliefs you from caring about the
> cases you don't think about in your wildest horror dreams.
> 
> 
> 

Your function will be called from another function, right ?
I guess you'll test this other function too !

So if this other(s) function(s) is(are) correct, you may safely assume 
the first one won't be called with erroneous parameter types.

Sacha
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186151531.491559.286400@19g2000hsx.googlegroups.com>
On 3 Aug., 14:01, Sacha <····@address.spam> wrote:

> Your function will be called from another function, right ?
> I guess you'll test this other function too !

How do you do that, as a library writer, for instance?
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8virv$f5m$1@aioe.org>
Ingo Menger escreveu:
> On 3 Aug., 14:01, Sacha <····@address.spam> wrote:
> 
>> Your function will be called from another function, right ?
>> I guess you'll test this other function too !
> 
> How do you do that, as a library writer, for instance?
> 
> 
You put assertions in your library code, for example.
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5scj893.fsf@dnsalias.com>
Cesar Rabak <·······@yahoo.com.br> writes:
> Ingo Menger escreveu:
>> On 3 Aug., 14:01, Sacha <····@address.spam> wrote:
>>
>>> Your function will be called from another function, right ?
>>> I guess you'll test this other function too !
>> How do you do that, as a library writer, for instance?
>>
> You put assertions in your library code, for example.

Sure, and if those assertions can be handled by a type theory then you
can write them in that form and have them checked before the program
runs so you can be sure the "assertion" will never trigger.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186199872.564523.242200@g12g2000prg.googlegroups.com>
> Sure, and if those assertions can be handled by a type theory then you
> can write them in that form and have them checked before the program
> runs so you can be sure the "assertion" will never trigger.

That's a big "if". Where is the type system that allows me to assert
something as simple as whether a date is constructed correctly (ie: 28
days in feb unless it's a leap year)? Where is the type system that
tells me when a sequence of operations leads to a singular matrix?
Where is the type system that keeps me from accidentally adding an
arithmetic op at the end of a basic block? Where is the type theory
that keeps me from setting a combination of sampling rate and sample
size that exceeds my sensor's bandwidth?

I feel like I'm being sold a bill of goods here. I see the utility of
guaranteeing invariants about the program, but where is this type
theory that let's me guarantee invariants in the problem domain that
aren't trivial?
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186226494.650486.263860@g4g2000hsf.googlegroups.com>
On 4 Aug., 05:57, Rayiner Hashem <·······@gmail.com> wrote:

> I feel like I'm being sold a bill of goods here. I see the utility of
> guaranteeing invariants about the program, but where is this type
> theory that let's me guarantee invariants in the problem domain that
> aren't trivial?

As you have been told before, it's not yet there. Period.
And, good news for you: For all times, you will be able find something
that can't yet be done automatically by the compiler.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186238498.331796.136800@z24g2000prh.googlegroups.com>
> As you have been told before, it's not yet there. Period.
> And, good news for you: For all times, you will be able find something
> that can't yet be done automatically by the compiler.

I'm not asking the compiler to prove everything automatically. I
didn't ask for a type system that could solve the halting problem
here, just for one that could enforce some simple, practical,
straightforward constraints that I picked from code I've written
recently. If you're admitting that the technology is not "not yet
there" to be able to enforce even these things, then what is the
premise of this debate? That the inability to automatically prove
trivial invariants about a program in Lisp is a tragic flaw in the
language? That I should give up the ability to do exploratory
development and incremental testing in order to be able to enforce
these, by your own admission, trivial constraints?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <c0vebubuhc.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> As you have been told before, it's not yet there. Period.
>> And, good news for you: For all times, you will be able find something
>> that can't yet be done automatically by the compiler.
>
> I'm not asking the compiler to prove everything automatically. I
> didn't ask for a type system that could solve the halting problem
> here, just for one that could enforce some simple, practical,
> straightforward constraints that I picked from code I've written
> recently. 


let my_date = new_date ( year, month, day )               (* [1] *)
in 
let contract_end = my_date ~+ time_interval(Days 4)       (* [2] *)
in 
   ...
      

At [1]: Here a precondition of the conversion from int*int*int to date
is checked, so an exception might reult if that precondition is
violated.

At [2]: This Operation always succeeds, since time_iterval and (~+)
are total (well, should be ...) and my_date is already a well formed
date. the result of the operation is a well formed date again. No
exception can occur.

Contrast that with the case that dates are represented as int*int*int
without type abstraction: You'd have to check at every operation that
takes a date (and also document that the precondition for every
operation).

> If you're admitting that the technology is not "not yet
> there" to be able to enforce even these things, then what is the
> premise of this debate? 

> That the inability to automatically prove
> trivial invariants about a program in Lisp is a tragic flaw in the
> language? That I should give up the ability to do exploratory
> development and incremental testing in order to be able to enforce
> these, by your own admission, trivial constraints?

I my book, you're welcome to use Lisp as much as you want. My
impression was, you wanted to know what other people get from static
type systems. We're not delusional, you know. 

And nobody really wants to force you to use other tools than you like
to. As far as I'm concerned you're welcome even to believe that static
type systems are harmful or useless. You know: I'm not in the business
to improve your toolset (neither concerning software nor
paradigms). At least not as long as it is for free :-).

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186418907.521614.222680@b79g2000hse.googlegroups.com>
> let my_date = new_date ( year, month, day )               (* [1] *)
> in
> let contract_end = my_date ~+ time_interval(Days 4)       (* [2] *)
> in
>    ...
>
> At [1]: Here a precondition of the conversion from int*int*int to date
> is checked, so an exception might reult if that precondition is
> violated.
>
> At [2]: This Operation always succeeds, since time_iterval and (~+)
> are total (well, should be ...) and my_date is already a well formed
> date. the result of the operation is a well formed date again. No
> exception can occur.

Congratulations, you've done something I could've easily done in Lisp
with a defclass and a defmethod, maintaining exactly the same
invariants (ie: possible execption on construction, and given a closed
date-add method, no possibility of encountering malformed dates in any
other code). Of course, Lisp will let you actively subvert the system
if you want, but this is not something you can do by accident.

> Contrast that with the case that dates are represented as int*int*int
> without type abstraction: You'd have to check at every operation that
> takes a date (and also document that the precondition for every
> operation).

Why the hell would a represent a date as a simple tuple?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xmyx4yp0x.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> Congratulations, you've done something I could've easily done in Lisp
> with a defclass and a defmethod, maintaining exactly the same
> invariants (ie: possible execption on construction, and given a closed
> date-add method, no possibility of encountering malformed dates in any
> other code). 

From a static-types point of view that could be considered a reverse
Greenspun, using some icky runtime OO system to do what a suitably
typeful language can do at compile time.

> Of course, Lisp will let you actively subvert the system
> if you want, but this is not something you can do by accident.

I would go further and say such an OO system should have a setting
where it totally prevents subversion (you might turn this off for
debugging and development) in order to permit modular reasoning about
the program, relying on the encapsulation.  Java is the only widely
used OO language that I know of that seriously attempts this, but
maybe it can be done in CLOS.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186509729.345653.316190@o61g2000hsh.googlegroups.com>
> From a static-types point of view that could be considered a reverse
> Greenspun, using some icky runtime OO system to do what a suitably
> typeful language can do at compile time.

Greenspunning is when the programmer has to build necessary features
onto a language that doesn't have it. Dynamic type-checking is a basic
design element of dynamic languages, and hence cannot be considered
Greenspunning. In any case, you still need the type-checking machinery
in an ML, in a different form. At the implementation level, there are
a lot of similarities between the pattern-matching machinery and
attendent checks for incomplete patterns and OO dispatch and type-
checking systems.

As for "icky runtime OO" system, what kind of argument is that?

> I would go further and say such an OO system should have a setting
> where it totally prevents subversion

What exactly would this accomplish? It's not like you can subvert
encapsulation in Lisp by accident. You have to do it purposefully. And
if I as a programmer decide to do something purposefully, I don't want
some lowly compiler second-guessing me! The comupter serves the human
being, not the other way around.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x4pjapmg5.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> What exactly would this accomplish? It's not like you can subvert
> encapsulation in Lisp by accident. You have to do it purposefully. And
> if I as a programmer decide to do something purposefully, I don't want
> some lowly compiler second-guessing me! The comupter serves the human
> being, not the other way around.

Well, the human wants a definite answer to whether the encapsulation
has been subverted.  If the computer can't supply that answer, then
it's not serving the human being very well.  And that means without
studying millions of lines of code, that might even be evalling
something at runtime that does the subversion.  I guess it's enough if
the compiler has a "--forbid-subversion" command line option that
causes attempted subversion to signal an error, and then the absence
of an error happening means there was no subversion.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <9xlkcn4eex.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> let my_date = new_date ( year, month, day )               (* [1] *)
>> in
>> let contract_end = my_date ~+ time_interval(Days 4)       (* [2] *)
>> in
>>    ...
>>
>> At [1]: Here a precondition of the conversion from int*int*int to date
>> is checked, so an exception might reult if that precondition is
>> violated.
>>
>> At [2]: This Operation always succeeds, since time_iterval and (~+)
>> are total (well, should be ...) and my_date is already a well formed
>> date. the result of the operation is a well formed date again. No
>> exception can occur.
>
> Congratulations, you've done something I could've easily done in Lisp
> with a defclass and a defmethod, maintaining exactly the same
> invariants (ie: possible execption on construction, and given a closed
> date-add method, no possibility of encountering malformed dates in any
> other code). Of course, Lisp will let you actively subvert the system
> if you want, but this is not something you can do by accident.

If that approach actually enforces that a given identifier occurrence
in the source is bound to only a specific type: Congratualtions:
You're already effectively using static types. So why resist the use
of static typing as vocally as you do?


Regards -- M
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhhe795ti240a@corp.supernews.com>
Rayiner Hashem wrote:
> If you're admitting that the technology is not "not yet
> there" to be able to enforce even these things, then what is the
> premise of this debate?

Static type systems automate useful proofs at compile time, improving
reliability.

> That I should give up the ability to do exploratory
> development and incremental testing in order to be able to enforce
> these, by your own admission, trivial constraints?

A static type system will help your exploratory development by pointing you
to invalidated code when you make changes, letting you evolve correct
programs. Static typing has no effect on incremental testing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sddmyx7tis4.fsf@shell01.TheWorld.com>
> Where is the type system that keeps me from accidentally adding an
> arithmetic op at the end of a basic block?

I don't know about your other cases, but this one is achievable today.
You have a distinct type for blocks that end in a jump from those that
don't.  Your code then doesn't allow adding arithmetic (or other
prohibited ops) to that type of block.  It's simple; it works; and it
does prevent errors.

I suspect the other problems you suggested have similar properties.  I
just haven't worked on those problems yet, so I don't have the answer
at hand.  Most of your problems look like cases, where you have two
conditions, and you want to make certain if you get one of the cases,
you can't get the other.  That's usually the case of having 3 types
that represent a 2 bit value, neither bit set, one bit set, the other
bit set, and the prohibited case both bits set is not allowed.  It's
easy to build types that look like that.  It doesn't even require a
lot of thought--well until your list of conditon involves more than 4
to 6 bits, in which case, ones want to use a tool to help one assure
that one has all the cases covered.  

Note that this is an additional tool on top of static typing that
helps one build the right set of types, because one has started down
a road where one needs exhaustive testing and coverage, something that
humans are consistently bad at.  So, this isn't a case where one wants
to give up static typing, but instead one wants something else in
addition to static typing.

As your problem gets more complex, one needs more tools to help one
constrain oneself from making more subtle errors, not less.  People
didn't invent these tools for fun.  They invented them because they
realized that they needed to extend their reach into areas where they
were mistake prone.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186242956.944345.275320@e9g2000prf.googlegroups.com>
> I don't know about your other cases, but this one is achievable today.
> You have a distinct type for blocks that end in a jump from those that
> don't.  Your code then doesn't allow adding arithmetic (or other
> prohibited ops) to that type of block.  It's simple; it works; and it
> does prevent errors.

That's not what I was trying to express. All blocks end in a jump (or
a return). What I want is a type:

internal-instruction = add-instruction | sub-instruction | mul-
instruction ...
terminator-instruction = jump-instruction | return-instruction ...
instruction = internal-instruction | terminator-instruction
basic-block = vector of instruction where last-element is subtype of
terminator-instruction and
                                          every-other-element is not
subtype of terminator-instruction

Note that any solution must maintain keep the terminators in with the
other instructions (and keep terminator-instruction as a subtype of
instruction), because terminators are otherwise treated as regular
instructions (ie: they have input operands that affect liveness, etc).

This is a really really simple problem btw.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <08hcnebtzf.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> I don't know about your other cases, but this one is achievable today.
>> You have a distinct type for blocks that end in a jump from those that
>> don't.  Your code then doesn't allow adding arithmetic (or other
>> prohibited ops) to that type of block.  It's simple; it works; and it
>> does prevent errors.
>
> That's not what I was trying to express. All blocks end in a jump (or
> a return). What I want is a type:
>
> internal-instruction = add-instruction | sub-instruction | mul-
> instruction ...
> terminator-instruction = jump-instruction | return-instruction ...
> instruction = internal-instruction | terminator-instruction
> basic-block = vector of instruction where last-element is subtype of
> terminator-instruction and
>                                           every-other-element is not
> subtype of terminator-instruction
>
> Note that any solution must maintain keep the terminators in with the
> other instructions (and keep terminator-instruction as a subtype of
> instruction), because terminators are otherwise treated as regular
> instructions (ie: they have input operands that affect liveness, etc).
>
> This is a really really simple problem btw.

And how is that solved in dynamically typed language?

BTW: Have a look to the Haskell IO Monad. I've the impression it does
things very similar to what you want here.

Regards -- Markus
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186389890.104224.253750@19g2000hsx.googlegroups.com>
On 4 Aug., 17:55, Rayiner Hashem <·······@gmail.com> wrote:
>
> That's not what I was trying to express. All blocks end in a jump (or
> a return). What I want is a type:
>
> internal-instruction = add-instruction | sub-instruction | mul-
> instruction ...
> terminator-instruction = jump-instruction | return-instruction ...
> instruction = internal-instruction | terminator-instruction
> basic-block = vector of instruction where last-element is subtype of
> terminator-instruction and
>                                           every-other-element is not
> subtype of terminator-instruction
>
> Note that any solution must maintain keep the terminators in with the
> other instructions (and keep terminator-instruction as a subtype of
> instruction), because terminators are otherwise treated as regular
> instructions (ie: they have input operands that affect liveness, etc).

You can map that almost verbatim into a set of datatypes:

  type internal = Add of ... | Sub of ... | Mul of ... | ...
  type terminal = Jump of ... | Return of ...
  type 'a instruction = 'a * attributes
  type basic_block = Cons of internal instruction * basic_block
                   | Term of terminal instruction

> This is a really really simple problem btw.

Indeed.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186424052.642175.299260@19g2000hsx.googlegroups.com>
> You can map that almost verbatim into a set of datatypes:

An "almost" solution is no solution at all.

>
>   type internal = Add of ... | Sub of ... | Mul of ... | ...
>   type terminal = Jump of ... | Return of ...
>   type 'a instruction = 'a * attributes
>   type basic_block = Cons of internal instruction * basic_block
>                    | Term of terminal instruction

This is an incorrect solution. As the question is posed, the
instruction vector should be, well, a vector. For reasons of cache
friendliness, algorithmic complexity, etc.
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186434188.238824.177570@22g2000hsm.googlegroups.com>
On 6 Aug., 20:14, Rayiner Hashem <·······@gmail.com> wrote:
>
> >   type internal = Add of ... | Sub of ... | Mul of ... | ...
> >   type terminal = Jump of ... | Return of ...
> >   type 'a instruction = 'a * attributes
> >   type basic_block = Cons of internal instruction * basic_block
> >                    | Term of terminal instruction
>
> This is an incorrect solution.

Only by wilful wide stretch of the meaning of the word "incorrect".

> As the question is posed, the
> instruction vector should be, well, a vector. For reasons of cache
> friendliness, algorithmic complexity, etc.

I don't buy this. Cache friendliness is a straw man, because your
vector will consist of indirections anyway. And with respect to
algorithms I doubt that a vector is an advantage here (IME with code
like this you need cheap splitting and joining, rather than random
access).

But if you absolutely think that you cannot afford a list, you can
still fall back to type abstraction to enforce the invariant on a
vector.

Anyway, I fail to see how /not/ having a type system makes the
situation any better. With one, you at least have a choice.
From: Fred Gilham
Subject: Garnet
Date: 
Message-ID: <u77io8wdf8.fsf_-_@snapdragon.csl.sri.com>
In the process of testing a bunch of stuff for CMUCL, I worked on
Garnet a bit recently.

Is anyone still interested in Garnet?

I noticed on Cliki that someone named Stelios Kokkalis was talking
about releasing a new version of Garnet with bug fixes and new
features.  However, I don't see anything on this yet.

Garnet has its advantages, though I've found a number of bugs in it
this go-round (some of which I've fixed).

-- 
Fred Gilham                                  ······@csl.sri.com
Behold, how good and pleasant it  is when brothers dwell in unity.  It
is like the  precious oil upon the head, running  down upon the beard,
upon the beard of Aaron, running  down on the collar of his robes.  It
is like the dew of Hermon,  which falls on the mountains of Zion.  For
there the LORD has commanded the blessing, life for evermore.  -Ps 133
From: Ken Tilton
Subject: Re: Garnet
Date: 
Message-ID: <MxPti.108$75.91@newsfe12.lga>
Fred Gilham wrote:
> In the process of testing a bunch of stuff for CMUCL, I worked on
> Garnet a bit recently.
> 
> Is anyone still interested in Garnet?
> 
> I noticed on Cliki that someone named Stelios Kokkalis was talking
> about releasing a new version of Garnet with bug fixes and new
> features.  However, I don't see anything on this yet.
> 
> Garnet has its advantages...

Can any of them be distilled out and salvaged as standalone libraries? I 
believe the non-CLOS thing is a show stopper, and Cells has replicated 
many of the charms of KR.

kt

-- 
http://www.theoryyalgebra.com/

"Algebra is the metaphysics of arithmetic." - John Ray

"As long as algebra is taught in school,
there will be prayer in school." - Cokie Roberts

"Stand firm in your refusal to remain conscious during algebra."
    - Fran Lebowitz

"I'm an algebra liar. I figure two good lies make a positive."
    - Tim Allen
From: Fred Gilham
Subject: Re: Garnet
Date: 
Message-ID: <u7abt4ktvc.fsf@snapdragon.csl.sri.com>
Ken Tilton <···········@optonline.net> writes:

> Fred Gilham wrote:
>> In the process of testing a bunch of stuff for CMUCL, I worked on
>> Garnet a bit recently.
>>
>> Is anyone still interested in Garnet?
>>
>> I noticed on Cliki that someone named Stelios Kokkalis was talking
>> about releasing a new version of Garnet with bug fixes and new
>> features.  However, I don't see anything on this yet.
>>
>> Garnet has its advantages...
>
> Can any of them be distilled out and salvaged as standalone libraries?
> I believe the non-CLOS thing is a show stopper, and Cells has
> replicated many of the charms of KR.
>
> kt
>

What is the one thing Garnet has that Cells will never have????

Documentation!!! ;-)


To answer your question, though, I think it's hard to imagine doing
what you're asking, because it's all built on KR.

I suppose if someone wrote a KR emulation in CLOS, it might work, but
that would be beyond the capabilities of mere mortals like me.

-- 
Fred Gilham                                  ······@csl.sri.com
It is the nature of love to bind itself, and the institution of
marriage merely paid the average man the compliment of taking him at
his word.                           -- G. K. Chesterton
From: Madhu
Subject: Re: Garnet
Date: 
Message-ID: <m3k5s6kohx.fsf@robolove.meer.net>
* Fred Gilham <·················@snapdragon.csl.sri.com> :
| In the process of testing a bunch of stuff for CMUCL, I worked on
| Garnet a bit recently.
|
| Is anyone still interested in Garnet?

I'd like to get a copy with the fixes if possible.  I'm trying to write
a GUI app for my dad at the moment and think I need a constraints
engine.  I am finding myself inadequate to the task given the tools at
hand.  The app will likely need to run on windows: capi or ltk but
perhaps I can get some ideas on the widgets i need to come up with from
prototyping under cmu/clx.

(Julian Stecklina made a standalone release of kr a few years back. I
just got a copy of the garnet docs from your ftp site and am trying to
get started with that :)
--
Regards
Madhu
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186436534.692843.296080@w3g2000hsg.googlegroups.com>
> Only by wilful wide stretch of the meaning of the word "incorrect".

It's incorrect with respect to the specification I presented of the
solution I wanted. What part of the "vector of"... was unclear?

> I don't buy this. Cache friendliness is a straw man, because your
> vector will consist of indirections anyway.

The spine of the vector will cache well, so you only risk a cache miss
when you access each instruction. You'll risk this same cache miss in
the list arrangement, plus another one for each list node you access.
Inlining of fields in the compiler could change things, but that's a
separate issue.

> And with respect to
> algorithms I doubt that a vector is an advantage here (IME with code
> like this you need cheap splitting and joining, rather than random
> access).

You walk the instructions way more often then you insert or remove
them. Using the vector could reduce the number of cache misses during
a walk by half. Moreover, basic blocks are usually quite small, so
even though the insert/remove is O(N) instead of O(1), the smaller
constant factor means it's not much if at all slower.

> But if you absolutely think that you cannot afford a list, you can
> still fall back to type abstraction to enforce the invariant on a
> vector.

It's not a matter of being able to afford it or not. I have a
particular solution I want to express. I have a good reason for why I
chose that particular solution. I want a tool that shuts up and let's
me do what I want, not one that forces me to express things in a
particular way to make it happy.

> Anyway, I fail to see how /not/ having a type system makes the
> situation any better. With one, you at least have a choice.

You're missing the point. I'm not saying that not having a static type
system would make these examples any better, what I'm saying is "these
are the examples that are important to me, what does static typing buy
me for them?" If the answer is "well, not that much", it's legitimate
to wonder why I should give up the advantages of dynamic typing for
such little gain.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xtwsw86ylc.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Only by wilful wide stretch of the meaning of the word "incorrect".
>
> It's incorrect with respect to the specification I presented of the
> solution I wanted. What part of the "vector of"... was unclear?

Most probably the vector part: What is a vector in your opinion?

-- M
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w7sl6w6yfn.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> But if you absolutely think that you cannot afford a list, you can
>> still fall back to type abstraction to enforce the invariant on a
>> vector.
>
> It's not a matter of being able to afford it or not. I have a
> particular solution I want to express. I have a good reason for why I
> chose that particular solution. I want a tool that shuts up and let's
> me do what I want, not one that forces me to express things in a
> particular way to make it happy.

Ah, I understand now: Next time you do something with directed graphs
in it, you'll need a programming language with "graph" as a native
data type and the time after that, when you work on a problem of
quantum mechnics, you'll need a language with "hermitian operators" or
"observables" in it, perhaps even a "measurement". 

I wish you much fun the next time you select a language.

Regards -- markus
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186476385.833732.11740@q75g2000hsh.googlegroups.com>
On 6 Aug., 23:42, Rayiner Hashem <·······@gmail.com> wrote:
>
> It's incorrect with respect to the specification I presented of the
> solution I wanted. What part of the "vector of"... was unclear?

A specification is abstract. As far as I can tell, there are many
valid implementations of the abstract concept of "vector". A list is
one of them (with specific performance trade-offs).

> > I don't buy this. Cache friendliness is a straw man, because your
> > vector will consist of indirections anyway.
>
> The spine of the vector will cache well, so you only risk a cache miss
> when you access each instruction. You'll risk this same cache miss in
> the list arrangement, plus another one for each list node you access.

This is getting absurd, but: the spine of the list is likely to be
compact and will thus cache well, too. Moreover, the items very likely
contain many more indirections to follow during traversal, which will
dominate the cache cost - if it is relevant anyway.

Really, if you consider caching on this level, then this sounds like a
serious case of premature optimisation.

> I want a tool that shuts up and let's
> me do what I want, not one that forces me to express things in a
> particular way to make it happy.

You don't want a tool, you want magic. Tools always require you to
play with them, not against them.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186512986.956402.199220@57g2000hsv.googlegroups.com>
> A specification is abstract. As far as I can tell, there are many
> valid implementations of the abstract concept of "vector". A list is
> one of them (with specific performance trade-offs)

I have no interest in playing with terminology. I'm using "vector" the
way Lisp, C++, C#, Java, and Ada, and nearly all users of those
languages understand the term.

> This is getting absurd, but: the spine of the list is likely to be
> compact and will thus cache well, too.

It's not just a matter of fitting into the cache, but using the cache
and prefetching machinery effectively. Vectors interact much better
with these systems than lists do. Pointer-chasing is really one of the
slowest things you can do on a modern machine.

> Really, if you consider caching on this level, then this sounds like a
> serious case of premature optimisation.

Choosing proper data structures at the outset is not premature
optimization.

> You don't want a tool, you want magic. Tools always require you to
> play with them, not against them.

Wanting to express something as a vector is hardly "magic". It's
CS-101 stuff. If a static type system can't express those sorts of
basic constraints, what use is it to me? What does it buy me over Lisp?
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186524491.246966.33830@22g2000hsm.googlegroups.com>
On 7 Aug., 20:56, Rayiner Hashem <·······@gmail.com> wrote:
> > You don't want a tool, you want magic. Tools always require you to
> > play with them, not against them.
>
> Wanting to express something as a vector is hardly "magic".

Nobody said it is. Demanding a type system that can second-guess
whatever choice of data structure you make is (which you did in what I
replied to).

> If a static type system can't express those sorts of
> basic constraints, what use is it to me?

My car cannot drive upstairs. If it can't even reach such nearby
places, what use is it to me?

More seriously, every tool has its limits. And you always have to
learn first how to use it to your advantage.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhjfd1ctu6812@corp.supernews.com>
Rayiner Hashem wrote:
> Wanting to express something as a vector is hardly "magic". It's
> CS-101 stuff. If a static type system can't express those sorts of
> basic constraints, what use is it to me?

Static type systems can express those constraints:

  type internal = Add | Sub | Mul
  type terminator = Jmp | Ret
  type block = internal array * terminator

That can be expressed in any modern functional programming language.

> What does it buy me over Lisp? 

Reliability, exploratory programming, inference, throwback, performance,
machine-verified documentation...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186524975.868645.170750@19g2000hsx.googlegroups.com>
On 7 Aug., 21:45, Jon Harrop <····@ffconsultancy.com> wrote:
>
> Static type systems can express those constraints:
>
>   type internal = Add | Sub | Mul
>   type terminator = Jmp | Ret
>   type block = internal array * terminator

I refrained from suggesting that, because I suppose that Rayiner
demands to be able to index the terminator. What he disregards,
though, is that the structure he describes isn't a homogeneous vector
and that it thus is a suboptimal choice to represent it as such.

Typeful programming is all about understanding the inner logical
structure of things and molding it into types as far as possible.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186530824.192979.38760@o61g2000hsh.googlegroups.com>
> I refrained from suggesting that, because I suppose that Rayiner
> demands to be able to index the terminator. What he disregards,
> though, is that the structure he describes isn't a homogeneous vector
> and that it thus is a suboptimal choice to represent it as such.
>
> Typeful programming is all about understanding the inner logical
> structure of things and molding it into types as far as possible.

In the "logical inner structure" of a basic block, the terminator is
most definitely part of the vector. Very little code needs to treat
the terminator specially --- not being able to index the terminator
would indeed be a major inconvenience.
From: ········@ps.uni-sb.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186603750.841654.77450@w3g2000hsg.googlegroups.com>
Rayiner Hashem wrote:
>
> In the "logical inner structure" of a basic block, the terminator is
> most definitely part of the vector.

Even though the necessity of using ad-hoc descriptions in your pseudo-
grammar already shows a different picture?

> Very little code needs to treat
> the terminator specially --- not being able to index the terminator
> would indeed be a major inconvenience.

Well, treating the terminator differently certainly isn't the same as
accessing it seperately in the surrounding block. In fact, I have
written code like that, and I cannot confirm the "major inconvenience"
you allege at all.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186621039.171904.24640@57g2000hsv.googlegroups.com>
> Well, treating the terminator differently certainly isn't the same as
> accessing it seperately in the surrounding block. In fact, I have
> written code like that, and I cannot confirm the "major inconvenience"
> you allege at all.

Every place where I would simply iterate over the instructions in the
block, I know have to iterate over the instruction vector, then
explicitly handle the terminator. Yes, this could be wrapped, but
there are a lot of cases to wrap, and it gets ugly.

I should note that I looked up how MLRISC does their basic blocks, and
they treat the instruction vector as a heterogeneous sequence as well.
In fact, their solution of treating INSTRUCTION has an abstract data
type then pattern-matching when they need to distinguish them in
certain places doesn't seem to differ much in principle from how I do
things.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmk3recig33e9@corp.supernews.com>
Rayiner Hashem wrote:
>> Well, treating the terminator differently certainly isn't the same as
>> accessing it seperately in the surrounding block. In fact, I have
>> written code like that, and I cannot confirm the "major inconvenience"
>> you allege at all.
> 
> Every place where I would simply iterate over the instructions in the
> block, I know have to iterate over the instruction vector, then
> explicitly handle the terminator. Yes, this could be wrapped, but
> there are a lot of cases to wrap, and it gets ugly.

In F#, you just derive from Seq and implement get and set. That is three
lines of code: hardly a "lot of cases" or "ugly"...

Similarly for all of the other languages, except they do not provide an
equivalent to Seq in their standard libraries.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186687734.189070.91380@d30g2000prg.googlegroups.com>
> Similarly for all of the other languages, except they do not provide an
> equivalent to Seq in their standard libraries.

Which means that if I do actually go down that route, I have to
reimplement map, reduce, etc...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmuo3jfq476a1@corp.supernews.com>
Rayiner Hashem wrote:
>> Similarly for all of the other languages, except they do not provide an
>> equivalent to Seq in their standard libraries.
> 
> Which means that if I do actually go down that route, I have to
> reimplement map, reduce, etc...

  let map f (a, e) = Array.map f a, f e
  let reduce f (a, e) = Array.fold_right f a e

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkabg30utsld5@corp.supernews.com>
Rayiner Hashem wrote:
> In the "logical inner structure" of a basic block, the terminator is
> most definitely part of the vector. Very little code needs to treat
> the terminator specially --- not being able to index the terminator
> would indeed be a major inconvenience.

Sounds like you don't know what the presented solutions do...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186526267.586213.226330@w3g2000hsg.googlegroups.com>
> Static type systems can express those constraints:
>
>   type internal = Add | Sub | Mul
>   type terminator = Jmp | Ret
>   type block = internal array * terminator

Neat. Now I have two incorrect solutions to this particular problem...
From: Peter Ilberg
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Pine.BSO.4.64.0708081139410.9921@dalek.dy.natinst.com>
(Apologies if you receive multiple copies of this post. Neither static nor
dynamic typing seem powerful enough to prevent my level of incompetence.)

On Tue, 7 Aug 2007, Rayiner Hashem wrote:
> > Static type systems can express those constraints:
> > 
> >   type internal = Add | Sub | Mul
> >   type terminator = Jmp | Ret
> >   type block = internal array * terminator
> 
> Neat. Now I have two incorrect solutions to this particular problem...

This is a interesting problem. I don't think you can encode your constraints

   	1 - last instruction in a block is a terminator
   	2 - all instructions in a block are stored in a vector
   		(or some other sequence type for that matter)

in the data type definitions directly.

But I think you might be able to encode these constraints in the types of
the functions that construct blocks. If you can only construct blocks
using these functions and the functions statically prevent you from
constructing invalid blocks, then it doesn't matter if the block data 
types also enforce the same constraints.

So here's my attempt at a solution to your challenge (in Haskell).
Please let me know if I've missed anything and what it is that I've 
missed, because I really want to find out if I can use the static type 
system to fullfill your requirements. (I'm learning Haskell and looking 
for motivating examples (for me) to better understand some of the 
advanced features.)

Thanks for the interesting challenge,

-- Peter

----------------------------------------------------------------------

{-# OPTIONS_GHC -fglasgow-exts #-} -- we need this for phantom types
module Instructions where

import Array

data Internal = Add | Sub | Mul
           deriving (Show) -- so we can display them in the repl

data Terminator = Jmp | Ret
           deriving (Show)

data Instruction = I Internal | T Terminator
           deriving (Show)

-- phantom types i.e. type without values
data Open       -- an open block can be extended with more instructions
data Closed

-- a block is a list of instructions
-- (using list instead of vector for convenience in construction)
-- (see below for how to convert this to a vector)
data Block a = Blk [Instruction]
           deriving (Show)

-- empty blocks can be extended by default
emptyBlock :: Block Open
emptyBlock = Blk []

-- you can append a normal instruction to a block
append :: Internal -> Block Open -> Block Open
append i (Blk is) = Blk (is ++ [I i])

-- or you can finish a block with a terminator
finish :: Terminator -> Block Open -> Block Closed
finish i (Blk is) = Blk (is ++ [T i])

-- example
instructions = finish Jmp (append Add (append Mul emptyBlock))

-- type checker complains about this one
-- instructions = append Add (finish Jmp (append Mul emptyBlock))

-- convert list of instructions to vector
-- (only works for "finished" blocks)
toVector :: Block Closed -> Array Int Instruction
toVector (Blk is) = listArray (1, length is) is
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkaa0mll5pgd2@corp.supernews.com>
Rayiner Hashem wrote:
>> Static type systems can express those constraints:
>>
>>   type internal = Add | Sub | Mul
>>   type terminator = Jmp | Ret
>>   type block = internal array * terminator
> 
> Neat. Now I have two incorrect solutions to this particular problem...

Care to elaborate?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <qylkcqbu1k.fsf@hod.lan.m-e-leypold.de>
Chris F Clark wrote:

>> Where is the type system that keeps me from accidentally adding an
>> arithmetic op at the end of a basic block?
>
> I don't know about your other cases, but this one is achievable today.
> You have a distinct type for blocks that end in a jump from those that
> don't.  Your code then doesn't allow adding arithmetic (or other
> prohibited ops) to that type of block.  It's simple; it works; and it
> does prevent errors.

Yep. Basically that's the trick the Haskell IO Monad uses: Enforce a
composition syntax by typeing the composed elements appropriately.

Regards -- Markus
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87bqdnjrtj.fsf@dnsalias.com>
Rayiner Hashem <·······@gmail.com> writes:
>> Sure, and if those assertions can be handled by a type theory then you
>> can write them in that form and have them checked before the program
>> runs so you can be sure the "assertion" will never trigger.
[snip]
> I feel like I'm being sold a bill of goods here. I see the utility of
> guaranteeing invariants about the program, but where is this type
> theory that let's me guarantee invariants in the problem domain that
> aren't trivial?

Caveat emptor.  I'm not selling you a type theory that can handle all
invariants.  I'm noting that *if* one is willing to write invariants[1]
*and* those invariants can be handled by a type theory then you get
the check at compile time rather than runtime[2].  For all those
invariants that can't be handled by the type theory, they stay as
runtime assertions.

---------------------
[1] I consider that a big "if" -- how many people systematically
    include invariants in every function/method they write?

[2] It may also be that the compiler will infer some of those
    invariants thereby saving you the effort of writing them.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ukr6mibu5e.fsf@hod.lan.m-e-leypold.de>
'stephen AT dino DOT dnsalias DOT com (Stephen J DOT Bevan)' wrote:

> Rayiner Hashem <·······@gmail.com> writes:
>>> Sure, and if those assertions can be handled by a type theory then you
>>> can write them in that form and have them checked before the program
>>> runs so you can be sure the "assertion" will never trigger.
> [snip]
>> I feel like I'm being sold a bill of goods here. I see the utility of
>> guaranteeing invariants about the program, but where is this type
>> theory that let's me guarantee invariants in the problem domain that
>> aren't trivial?
>
> Caveat emptor.  I'm not selling you a type theory that can handle all
> invariants.  I'm noting that *if* one is willing to write invariants[1]
> *and* those invariants can be handled by a type theory then you get
> the check at compile time rather than runtime[2].  

Just a tiny amendment:

Essentially all invariants that can be tested can be handled by a type
theory. Dynamically at conversion time. The type just serves as an
assurance that that the test has already been made, so doesn't need to
be repeated all the time if the operation in question stays in the
type domain.

It's, BTW, discussions like this, where type systems and proofs on
contracts are completely confused with each other, that convince me
that our proponents either have large blind spot regarding an
important subject area of computer science (and I'd have expected any
serious software developer thes days has at least theoretical
knowledge how type systems work and how they are used, even if he uses
a dynamic language for some reasons) or are complete newbies.


Regards -- Markus




> For all those
> invariants that can't be handled by the type theory, they stay as
> runtime assertions.
>
> ---------------------
> [1] I consider that a big "if" -- how many people systematically
>     include invariants in every function/method they write?
>
> [2] It may also be that the compiler will infer some of those
>     invariants thereby saving you the effort of writing them.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87lkcr3ul3.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Rayiner Hashem <·······@gmail.com> writes:

> That's a big "if". Where is the type system that allows me to assert
> something as simple as whether a date is constructed correctly (ie: 28
> days in feb unless it's a leap year)?

In Common Lisp:-)  Something like (satisfies date-p)...

Nicolas
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <nqd4y2btvi.fsf@hod.lan.m-e-leypold.de>
Nicolas Neuss wrote:

> Rayiner Hashem <·······@gmail.com> writes:
>
>> That's a big "if". Where is the type system that allows me to assert
>> something as simple as whether a date is constructed correctly (ie: 28
>> days in feb unless it's a leap year)?
>
> In Common Lisp:-)  Something like (satisfies date-p)...

And it would be exactly the same in a statically typed language with
the difference that testing and value construction would be one
operation and the type would serve as an assurance that the test has
been already made, so need not repeated. See the example I gave
Rayiner in another reply.

Regards .. Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <yr8x8qd9m4.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Sure, and if those assertions can be handled by a type theory then you
>> can write them in that form and have them checked before the program
>> runs so you can be sure the "assertion" will never trigger.

> That's a big "if". 

No, it's the most common case. "This function handels integer numbers,
no strings, not lists, etc. It will throw an exception when being
passed something not an Integer". This and similar assertions are so
common in interface contracts that most people don't realize at all
it's a precondition. In a statically typed language this is already
contained in the type system.

> Where is the type system that allows me to assert
> something as simple as whether a date is constructed correctly (ie: 28
> days in feb unless it's a leap year)? 

We already had that (the prime example): Types are used to mark data
flow for which checks (during data construction) already have been
done. This is one thing. Second: Type systems don't usually cover all
preconditions (like: This number K must divisible through this other
number N). And this is not the intention: Type system cover the most
common pre conditions by restricting the values passed to already well
defined sets (and thus also telleing the compiler which operations are
permissible and which should already be flagged as eerros during
compilation). Mind: Even if you can't get all, what you get is still
worth it.

> Where is the type system that
> tells me when a sequence of operations leads to a singular matrix?

So you don't have it in a dynamical type system. That's now problem
for you. You don't get it in a static type system and then you
complain? What kind of attitude is this?


> Where is the type theory that keeps me from setting a combination of
> sampling rate and sample size that exceeds my sensor's bandwidth?

Actually there is a lot that can be done in this direction.

> I feel like I'm being sold a bill of goods here. 

> I see the utility of guaranteeing invariants about the program, but
> where is this type theory that let's me guarantee invariants in the
> problem domain that aren't trivial?

I think you misunderstand a lot of things here. Again: During value
construction the invariants are checked. Necessarily dynamically if
the values a constructed from other types. Then when operating on the
types the invariants are conserved if the operations are properly
designed. The purpose of the type system is basically to restrict
exposure of the representation of a type in the module which is the
only place where one has to care for consistency (prove that one
conserves the invariants). From the outside it's impossible then to
violate the invariants.

Static types basically act as markers to mark the data flow (which kind of
data reaches which part of the program). Thus they act as a central
corner stone around which contracts (as in design by contract) can be
formulated.

My impression is, you mix up type systems and systems for program
verification (and that you're not alone in this respect). While there
is a relationship, generally these a different things.

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186422725.306365.121040@22g2000hsm.googlegroups.com>
> No, it's the most common case.
I'm done with unsubstantiated claims. Provide some evidence of this
claim, or drop it.

> We already had that (the prime example): Types are used to mark data
> flow for which checks (during data construction) already have been
> done.

I understand that, and types are used in dynamically-typed languages
for the exact same purposes. It's hard to explain to someone who
doesn't believe that dynamically-typed languages have types at all,
but structure and class abstractions are used in Lisp the same way
type abstractions are in statically-typed languages. Given correct
constructors and closed manipulation functions, the increased
probability of runtime error in dynamically-typed languages stems from
the fact that type flows in dynamically-typed languages are verified
at run-time, while type-flows in statically-typed languages are
verified at compile-time.

Thus, differences in reliability stem from the extent to which testing
can verify that incorrect types will not appear along some data-flow
path in the program. Testing cannot prove that incorrect types will
not appear along some path in some, but it can make that assertion
with an arbitrarily high degree of confidence.

Dynamic-typing proponents have pointed out that they, in practice, do
not often encounter type errors that escape their testing frameworks.
This claim should not be dismissed off-hand, especially because there
is little empirical evidence to suggest that large systems written in
Lisp are less reliable than their counterparts written in ML or
Haskell!

There is a possible theoretical justification for this observation.
Data collected in the context of type inference* research in dynamic
languages suggests that the vast majority of data flow paths in
programs, even in highly dynamic languages like Smalltalk and Self,
are monomorphic. This suggests that if an incorrect data type appears
along some data flow path in the program, it will do so, with very
high probability, in all executions that exercise that path. That
would explain why Lisp programmers observe very few type errors
escaping their test frameworks.

> So you don't have it in a dynamical type system. That's now problem
> for you. You don't get it in a static type system and then you
> complain? What kind of attitude is this?

The dynamic type system doesn't claim to make far-reaching assurances
of static verifiability! Surely, upon being marketed a new widget, it
is reasonable for someone to ask "does this really do all that much
more than my existing widget, and are those features worth the cost?"
It's completely reasonable for the prospective buyer to then say
"well, I'd find this new widget useful if it did X, which my existing
widget doesn't do, but since it can't, what's the point?"
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xir7synk6.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> Given correct
> constructors and closed manipulation functions, the increased
> probability of runtime error in dynamically-typed languages stems from
> the fact that type flows in dynamically-typed languages are verified
> at run-time, while type-flows in statically-typed languages are
> verified at compile-time.

Actually every serious Lisp compiler I know of for general purpose
computers has a way to turn off the runtime checks for speed, and
programs running with the checks turned off are not verified at all.
The Lisp code at that point is no different than C code (argggh!!!).

> Dynamic-typing proponents have pointed out that they, in practice, do
> not often encounter type errors that escape their testing frameworks.

Right, but that only shows that the test cases don't uncover any
errors.  It doesn't show that there are no errors.

I take the view that I've been in this field long enough to think that
while I'm hopefully not a total moron in the overall scheme of things,
there are many other folks out there who are a heck of a lot smarter
than I am.  The same is true for everyone in this discussion, unless
you have a Nobel prize or Fields medal that you're haven't told us
about.

Therefore, just because I can't think up suitably devious tests to
uncover any type errors I might have made, it doesn't mean there's no
diabolical genius out there who can find a subtle error and use it
maliciously against my program, especially since I had to develop and
test the program on a budget and to a schedule, while the bad guy can
spend years and millions finding the error.  Therefore, IMO, "this
function returns an integer for every test input that I happened to
think of" is a much less valuable proposition than "this function
returns an integer for every input as shown by static analysis".  And
we are all in the same boat.

Did you ever read "A Fire Upon The Deep"?  It's a fantastically
excellent SF novel; I don't do it the slightest bit of justice by
describing it as being about a malicious computer virus with godlike
intelligence that tries to take over the galaxy.  Certainly even
though I may not know how to achieve it, I want to always have the
goal of writing code that can withstand scrutiny from even that level
of evil entity.

> There is a possible theoretical justification for this observation.
> Data collected  in the context of type inference* research in dynamic
> languages suggests that the vast majority of data flow paths in
> programs, even in highly dynamic languages like Smalltalk and Self,
> are monomorphic.  

This is kind of interesting, but if the vast majority of that code can
be type-inferred, then why stop there, instead of building the type
inference into the compiler and flagging any problems?

> It's completely reasonable for the prospective buyer to then say
> "well, I'd find this new widget useful if it did X, which my existing
> widget doesn't do, but since it can't, what's the point?"

Well, one thing that got me interested in these statically typed
languages (besides freakish paranoia) was seeing their users beat Lisp
users in the ICFP programming contest year after year after year.  The
static language users simply get stuff done more effectively.  I used
to be a Lisper too, but I think at this point that the widget needing
justification is Lisp.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87wsw7q1om.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> This is kind of interesting, but if the vast majority of that code can
> be type-inferred, then why stop there, instead of building the type
> inference into the compiler and flagging any problems?

This is exactly the approach taken by some Common Lisp (CL)
implementations.  Those problems are flagged as warnings at compile time.
(I don't know what Rayiner Hashem is using, probably not CMUCL/SBCL,
because he constantly ignores that CL implementations do compile time type
inference as well.)  Could you tell me what is wrong with that?  Does it
have to be a compile time error instead of a warning to make you happy?
Does it have to be forced into a new CL standard that the compiler has to
do this and that analysis?

> > It's completely reasonable for the prospective buyer to then say "well,
> > I'd find this new widget useful if it did X, which my existing widget
> > doesn't do, but since it can't, what's the point?"
> 
> Well, one thing that got me interested in these statically typed
> languages (besides freakish paranoia) was seeing their users beat Lisp
> users in the ICFP programming contest year after year after year.  The
> static language users simply get stuff done more effectively.  I used
> to be a Lisper too, but I think at this point that the widget needing
> justification is Lisp.

I think it depends very much on which level of programmer participates.
Some years ago, I looked and saw that for the winning language a team
consisting of the language implementors participated (was it OCaml?).  On
the other hand, it seems that Common Lisp gurus (Kent Pitman, Peter Norvig,
Paul Graham, ...) or CL implementors (Duane Rettig, Christophe Rhodes, ...)
have better things to do with their time.

Nicolas
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x643rajfu.fsf@ruckus.brouhaha.com>
Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> This is exactly the approach taken by some Common Lisp (CL)
> implementations.  Those problems are flagged as warnings at compile time.
> (I don't know what Rayiner Hashem is using, probably not CMUCL/SBCL,
> because he constantly ignores that CL implementations do compile time type
> inference as well.)  Could you tell me what is wrong with that?  Does it
> have to be a compile time error instead of a warning to make you happy?

It's ok if it's just a warning, as long 1) the coverage is complete,
partial inference doesn't count; 2) the coding standard is that code
can't actually be checked into a production build if the compiler
emits any warning messages (you can still develop and debug with these
warnings happening); 3) turning off the messages with compiler
directives is not allowed--you have to actually fix the code.  But
then you've got a statically typed language.

Or at the very least, turning off any runtime type checking or
subscript checking in production code is absolutely forbidden, and
running with the checks turned on is only allowed in programs that are
permitted to crash if something unexpected happens (i.e. shutting down
is often acceptable while keeping on running with incorrect results is
unacceptable).  

> I think it depends very much on which level of programmer participates.
> Some years ago, I looked and saw that for the winning language a team
> consisting of the language implementors participated (was it OCaml?).

I'm not aware of this but it's possible.

> On the other hand, it seems that Common Lisp gurus (Kent Pitman,
> Peter Norvig, Paul Graham, ...) or CL implementors (Duane Rettig,
> Christophe Rhodes, ...)  have better things to do with their time.

Well, Guy Steele is working on Java now, IIRC.  That's about as far
from Lisp as one can get.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-935A96.10524107082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> > This is exactly the approach taken by some Common Lisp (CL)
> > implementations.  Those problems are flagged as warnings at compile time.
> > (I don't know what Rayiner Hashem is using, probably not CMUCL/SBCL,
> > because he constantly ignores that CL implementations do compile time type
> > inference as well.)  Could you tell me what is wrong with that?  Does it
> > have to be a compile time error instead of a warning to make you happy?
> 
> It's ok if it's just a warning, as long 1) the coverage is complete,
> partial inference doesn't count; 2) the coding standard is that code
> can't actually be checked into a production build if the compiler
> emits any warning messages (you can still develop and debug with these
> warnings happening); 3) turning off the messages with compiler
> directives is not allowed--you have to actually fix the code.  But
> then you've got a statically typed language.
> 
> Or at the very least, turning off any runtime type checking or
> subscript checking in production code is absolutely forbidden, and
> running with the checks turned on is only allowed in programs that are
> permitted to crash if something unexpected happens (i.e. shutting down
> is often acceptable while keeping on running with incorrect results is
> unacceptable).  
> 
> > I think it depends very much on which level of programmer participates.
> > Some years ago, I looked and saw that for the winning language a team
> > consisting of the language implementors participated (was it OCaml?).
> 
> I'm not aware of this but it's possible.
> 
> > On the other hand, it seems that Common Lisp gurus (Kent Pitman,
> > Peter Norvig, Paul Graham, ...) or CL implementors (Duane Rettig,
> > Christophe Rhodes, ...)  have better things to do with their time.
> 
> Well, Guy Steele is working on Java now, IIRC.  That's about as far
> from Lisp as one can get.

He has been working on all kinds of stuff. HPF, Fortress, ...

-- 
http://lispm.dyndns.org
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ejifk02a.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> It's ok if it's just a warning, as long 1) the coverage is complete,
> partial inference doesn't count; 2) the coding standard is that code
> can't actually be checked into a production build if the compiler emits
> any warning messages (you can still develop and debug with these warnings
> happening); 3) turning off the messages with compiler directives is not
> allowed--you have to actually fix the code.  But then you've got a
> statically typed language.

OK, then I am definitely more liberal than you are (but wasn't it you who
worked also with Python?).  I would be completely happy if some Common Lisp
coworker and guru would make good use of very advanced dynamicity, as long
as my compiler also emits warnings also about suspicious constructs at
lower level.

Nicolas
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186511485.093391.53270@o61g2000hsh.googlegroups.com>
> (I don't know what Rayiner Hashem is using, probably not CMUCL/SBCL,
> because he constantly ignores that CL implementations do compile time type
> inference as well.)

I use SBCL, so I'm not really sure what you're talking about.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhgeum29l8203@corp.supernews.com>
Nicolas Neuss wrote:
> I think it depends very much on which level of programmer participates.

The assertion that all the best Lispers have moved onto OCaml and Haskell is
certainly corroborated by that evidence.

> Some years ago, I looked and saw that for the winning language a team
> consisting of the language implementors participated (was it OCaml?).  On
> the other hand, it seems that Common Lisp gurus (Kent Pitman, Peter
> Norvig, Paul Graham, ...) or CL implementors (Duane Rettig, Christophe
> Rhodes, ...) have better things to do with their time.

Just as Stephen Hawking is theoretically good at the long jump.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87abt279fo.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Jon Harrop <···@ffconsultancy.com> writes:

> Nicolas Neuss wrote:
> > I think it depends very much on which level of programmer participates.
> 
> The assertion that all the best Lispers have moved onto OCaml and Haskell
> is certainly corroborated by that evidence.

Could you name a few who will acknowledge that they have been "best Lisper"
and are now deep into OCaml/Haskell?

Nicolas

--
- Hey, I've heard that Jon Harrop has written a new book!
- Oh?  What title?  Is it "The Final Battle: F# against Ocaml"?
- No.  It aspires to Knuth's seminal work and is called "The Art of
  Usenet Spamming". 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkammd0l7ce4@corp.supernews.com>
Nicolas Neuss wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>> Nicolas Neuss wrote:
>> > I think it depends very much on which level of programmer participates.
>> 
>> The assertion that all the best Lispers have moved onto OCaml and Haskell
>> is certainly corroborated by that evidence.
> 
> Could you name a few who will acknowledge that they have been "best
> Lisper" and are now deep into OCaml/Haskell?

Aside from Matthias and Joachim, the creators of SML, OCaml, Haskell and F#
would be a good start.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-713C64.22540308082007@news-europe.giganews.com>
In article <··············@corp.supernews.com>,
 Jon Harrop <···@ffconsultancy.com> wrote:

> Nicolas Neuss wrote:
> > Jon Harrop <···@ffconsultancy.com> writes:
> >> Nicolas Neuss wrote:
> >> > I think it depends very much on which level of programmer participates.
> >> 
> >> The assertion that all the best Lispers have moved onto OCaml and Haskell
> >> is certainly corroborated by that evidence.
> > 
> > Could you name a few who will acknowledge that they have been "best
> > Lisper" and are now deep into OCaml/Haskell?
> 
> Aside from Matthias and Joachim, the creators of SML, OCaml, Haskell and F#
> would be a good start.

Joachim who?

Who are the others? Do you have names?

-- 
http://lispm.dyndns.org
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9hd8k$obe$1@online.de>
Jon Harrop schrieb:
> Aside from Matthias and Joachim, the creators of SML, OCaml, Haskell and F#
> would be a good start.

I have done some intense Lisp for a year, but that's been more than a 
decade ago; Common Lisp and Scheme were relatively new then, but I had 
no opportunity to use them.
My CLOS and Scheme knowledge is limited to reading up in the specs 
("reading up" can be relatively thorough when I do it, but I stopped 
after finding too many things I didn't like).

So I'd classify myself more as a deliberate non-Lisper, rather than as a 
"best Lisper".

Regards,
Jo
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5i2upcF3nd3e9U1@mid.individual.net>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> Aside from Matthias and Joachim, the creators of SML, OCaml, Haskell 
>> and F#
>> would be a good start.
> 
> I have done some intense Lisp for a year, but that's been more than a 
> decade ago; Common Lisp and Scheme were relatively new then, but I had 
> no opportunity to use them.
> My CLOS and Scheme knowledge is limited to reading up in the specs 
> ("reading up" can be relatively thorough when I do it, but I stopped 
> after finding too many things I didn't like).

https://lists.csail.mit.edu/pipermail/ll-discuss/2007-February/001186.html


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f9hl6i$4hi$1@online.de>
Pascal Costanza schrieb:
> Joachim Durchholz wrote:
>>
>> My CLOS and Scheme knowledge is limited to reading up in the specs 
>> ("reading up" can be relatively thorough when I do it, but I stopped 
>> after finding too many things I didn't like).
> 
> https://lists.csail.mit.edu/pipermail/ll-discuss/2007-February/001186.html

I knew that Clos has this kind of potential. Similar stuff could be done 
in Smalltalk, for example - and I knew (at least in principle) how to do 
that kind of stuff even in Lisp.
What I didn't like was that it gave you all the power, but none of the 
control.

One example:
There were mechanisms for modifying the way that inheritance would work. 
That's immensely powerful, and it can save your butt, of course.
However, it's also too easy to abuse. Write subclasses for a third-party 
library and see stuff break because that library is using these 
mechanisms in unexpected ways. See your code break when the library is 
upgraded and changes the way it uses these mechanisms.

It's this kind of stuff that I mean when I say "lots of power, little 
control".

Regards,
Jo
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-DFF6C0.11150807082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rayiner Hashem <·······@gmail.com> writes:
> > Given correct
> > constructors and closed manipulation functions, the increased
> > probability of runtime error in dynamically-typed languages stems from
> > the fact that type flows in dynamically-typed languages are verified
> > at run-time, while type-flows in statically-typed languages are
> > verified at compile-time.
> 
> Actually every serious Lisp compiler I know of for general purpose
> computers has a way to turn off the runtime checks for speed, and
> programs running with the checks turned off are not verified at all.
> The Lisp code at that point is no different than C code (argggh!!!).

Minus SBCL and CMUCL.
 
> > Dynamic-typing proponents have pointed out that they, in practice, do
> > not often encounter type errors that escape their testing frameworks.
> 
> Right, but that only shows that the test cases don't uncover any
> errors.  It doesn't show that there are no errors.

For many software development the task is to get working
software and not 'perfect software'. A static type
check also can't show that there are no errors. It
even can't show that there are no type errors, for example
since the compiler (unless it is a verified compiler on a verified
platform) has bugs, too. I would not trust any of the
current research compilers at all. Lots of complicated
code very few people understand. Sounds like a recipe for
disaster.

> I take the view that I've been in this field long enough to think that
> while I'm hopefully not a total moron in the overall scheme of things,
> there are many other folks out there who are a heck of a lot smarter
> than I am.  The same is true for everyone in this discussion, unless
> you have a Nobel prize or Fields medal that you're haven't told us
> about.
> 
> Therefore, just because I can't think up suitably devious tests to
> uncover any type errors I might have made, it doesn't mean there's no
> diabolical genius out there who can find a subtle error and use it
> maliciously against my program, especially since I had to develop and
> test the program on a budget and to a schedule, while the bad guy can
> spend years and millions finding the error.  Therefore, IMO, "this
> function returns an integer for every test input that I happened to
> think of" is a much less valuable proposition than "this function
> returns an integer for every input as shown by static analysis".  And
> we are all in the same boat.
> 
> Did you ever read "A Fire Upon The Deep"?  It's a fantastically
> excellent SF novel; I don't do it the slightest bit of justice by
> describing it as being about a malicious computer virus with godlike
> intelligence that tries to take over the galaxy.  Certainly even
> though I may not know how to achieve it, I want to always have the
> goal of writing code that can withstand scrutiny from even that level
> of evil entity.
> 
> > There is a possible theoretical justification for this observation.
> > Data collected  in the context of type inference* research in dynamic
> > languages suggests that the vast majority of data flow paths in
> > programs, even in highly dynamic languages like Smalltalk and Self,
> > are monomorphic.  
> 
> This is kind of interesting, but if the vast majority of that code can
> be type-inferred, then why stop there, instead of building the type
> inference into the compiler and flagging any problems?
> 
> > It's completely reasonable for the prospective buyer to then say
> > "well, I'd find this new widget useful if it did X, which my existing
> > widget doesn't do, but since it can't, what's the point?"
> 
> Well, one thing that got me interested in these statically typed
> languages (besides freakish paranoia) was seeing their users beat Lisp
> users in the ICFP programming contest year after year after year.

The significance of the ICFP is not that high as I see it.
This years contest was probably some fun, but the alien
theme (and the problem) was somehow telling...

>  The
> static language users simply get stuff done more effectively.  I used
> to be a Lisper too, but I think at this point that the widget needing
> justification is Lisp.

Well, I see it this way: Static Functional Programming Languages
are commercially mostly without success. There is no visible commercial
vendor selling a SFPL as product (SML, Haskell, ?).
When Harlequin went away a few years ago, their MLWorks
product disappeared from the market. The Lisp part (LispWorks) still
exists as a commercially entity and seems to do fine.
Most SFPLs have some financing through Universities (the
tax payer pays, that is) or company research (Microsoft).
Those groups have a pressure to demonstrate something,
even if it is a contest. Plus these groups have young, very
smart people with lots of time... 

OTOH, there is less research on dynamic languages now (there still is)
and the funding seems to be more diverse.

I really would prefer if some of the bright people at Microsoft
Research were working on making MS Office better with SFPLs, than
doing contest puzzles (this years ICFP seems to be more
of an intelligence test, than a contest for 'Functional Programming').

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xejifvgsn.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> For many software development the task is to get working
> software and not 'perfect software'. A static type
> check also can't show that there are no errors. It
> even can't show that there are no type errors, for example
> since the compiler (unless it is a verified compiler on a verified
> platform) has bugs, too. I would not trust any of the
> current research compilers at all. Lots of complicated
> code very few people understand. Sounds like a recipe for
> disaster.

How about VLisp?  It's even dynamic, sort of.  (Scheme subset).

> The significance of the ICFP is not that high as I see it.
> This years contest was probably some fun, but the alien
> theme (and the problem) was somehow telling...

Here are some slashdot comments:

    http://developers.slashdot.org/comments.pl?sid=173121&cid=14406160
    http://developers.slashdot.org/comments.pl?sid=173121&cid=14406569

    "A good example of people writing complex but bug-free software
    under time pressure is the annual ICFP Programming Contest. This
    contest runs over three days, the tasks are complex enough that
    you usually need to write 2000 - 3000 lines of code to tackle
    them, and the very first thing the judges do is to throw
    corner-cases at the programs in an effort to find bugs. Any
    incorrect result or crash and you're out of the contest
    instantly. After that, the winner is generally the
    highest-performing of the correct programs.

    Each year, up to 90% of the entries are eliminated in the first
    round due to bugs, usually including almost all the programs
    written in C and C++ and Java. Ocassionally, a C++ program will
    get through and may do well -- even win, as in 2003 when you
    didn't actually submit your program but ran it yourself (so it
    never saw data you didn't have a chance to fix it for). But most
    of the prize getters year after year seem to use one of three
    not-yet-mainstream languages: [Dylan, Haskell, Ocaml]."

> Those groups have a pressure to demonstrate something,
> even if it is a contest. Plus these groups have young, very
> smart people with lots of time... 

But why aren't they using Lisp?

> OTOH, there is less research on dynamic languages now (there still is)
> and the funding seems to be more diverse.

People keep recommending the CTM book, which uses Mozart/Oz, so maybe
that qualifies.

> I really would prefer if some of the bright people at Microsoft
> Research were working on making MS Office better with SFPLs, than
> doing contest puzzles (this years ICFP seems to be more
> of an intelligence test, than a contest for 'Functional Programming').

Apparently that SFPL stuff has made its way into C# and VB9...
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-28DC9C.11464807082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > For many software development the task is to get working
> > software and not 'perfect software'. A static type
> > check also can't show that there are no errors. It
> > even can't show that there are no type errors, for example
> > since the compiler (unless it is a verified compiler on a verified
> > platform) has bugs, too. I would not trust any of the
> > current research compilers at all. Lots of complicated
> > code very few people understand. Sounds like a recipe for
> > disaster.
> 
> How about VLisp?  It's even dynamic, sort of.  (Scheme subset).

Yeah, what about it? I know that people have been
working in this field. Have not seen a sign of life in
VLisp for some time - though I have not watched that
closely. There were/are other attempts also. Like
the Verifix project in Germany working on a verified subset
of Common Lisp. Some years ago...


> 
> > The significance of the ICFP is not that high as I see it.
> > This years contest was probably some fun, but the alien
> > theme (and the problem) was somehow telling...
> 
> Here are some slashdot comments:
> 
>     http://developers.slashdot.org/comments.pl?sid=173121&cid=14406160
>     http://developers.slashdot.org/comments.pl?sid=173121&cid=14406569
> 
>     "A good example of people writing complex but bug-free software
>     under time pressure is the annual ICFP Programming Contest. This
>     contest runs over three days, the tasks are complex enough that
>     you usually need to write 2000 - 3000 lines of code to tackle
>     them, and the very first thing the judges do is to throw
>     corner-cases at the programs in an effort to find bugs. Any
>     incorrect result or crash and you're out of the contest
>     instantly. After that, the winner is generally the
>     highest-performing of the correct programs.
> 
>     Each year, up to 90% of the entries are eliminated in the first
>     round due to bugs, usually including almost all the programs
>     written in C and C++ and Java. Ocassionally, a C++ program will
>     get through and may do well -- even win, as in 2003 when you
>     didn't actually submit your program but ran it yourself (so it
>     never saw data you didn't have a chance to fix it for). But most
>     of the prize getters year after year seem to use one of three
>     not-yet-mainstream languages: [Dylan, Haskell, Ocaml]."

Sure, we know Bruce. ;-) Marketing speech from a Dylan guy.
Dylan also got lots of funding and went nowhere.
 
> > Those groups have a pressure to demonstrate something,
> > even if it is a contest. Plus these groups have young, very
> > smart people with lots of time... 
> 
> But why aren't they using Lisp?

Last I looked these language research groups were working on SFPLs.
Kind of 'basic research' I guess.

> > OTOH, there is less research on dynamic languages now (there still is)
> > and the funding seems to be more diverse.
> 
> People keep recommending the CTM book, which uses Mozart/Oz, so maybe
> that qualifies.

This one too: http://www.htdp.org/
 
> > I really would prefer if some of the bright people at Microsoft
> > Research were working on making MS Office better with SFPLs, than
> > doing contest puzzles (this years ICFP seems to be more
> > of an intelligence test, than a contest for 'Functional Programming').
> 
> Apparently that SFPL stuff has made its way into C# and VB9...

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8p3ayv4boi.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> I take the view that I've been in this field long enough to think that
> while I'm hopefully not a total moron in the overall scheme of things,
> there are many other folks out there who are a heck of a lot smarter
> than I am.  

The smart people (like M Blume) already have left the discussion
already. I assume they have better things to do.

> Did you ever read "A Fire Upon The Deep"?  It's a fantastically
> excellent SF novel; 

> I don't do it the slightest bit of justice by describing it as being
> about a malicious computer virus with godlike intelligence that
> tries to take over the galaxy.

No you, don't :-). "A Fire Upon The Deep" goes way _beyond_ :-)
computer viruses.

> Certainly even though I may not know how to achieve it, I want to
> always have the goal of writing code that can withstand scrutiny
> from even that level of evil entity.

Too much mysticism, Paul. :-) The book is good, but probably not the
proper guidance to orient ones career towards.

Regards -- Markus

PS: I'm all for discussing SF now instead of static vs. dynamic
typing. BTW "Fire upon the Depth" has a rather nice usenet
parody/analogue in it which really really reminds me of the real
thing: People one never has heard before suddenly coming out of the
shadows with outrageous claims and giving themselves important names
("The XYZ study group") etc. 
From: Paul Rubin
Subject: Fire upon the deep (escape from shootout: ...)
Date: 
Message-ID: <7xy7gmwk4k.fsf_-_@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
> Too much mysticism, Paul. :-) The book is good, but probably not the
> proper guidance to orient ones career towards.
> 
> Regards -- Markus
> 
> PS: I'm all for discussing SF now instead of static vs. dynamic
> typing. BTW "Fire upon the Depth" has a rather nice usenet
> parody/analogue in it which really really reminds me of the real
> thing: People one never has heard before suddenly coming out of the
> shadows with outrageous claims and giving themselves important names
> ("The XYZ study group") etc. 

Heh, yeah, it was even called the "net of a thousand lies".  There was
also a side reference to a lot of interstellar travel being devoted to
transporting one-time-pad cryptographic material around the galaxy,
since cryptographic algorithms were useless and they needed a way to
communicate securely, but that most everything else was too expensive
to haul around instead of producing locally.  It also mentioned that
it was humorous that anyone had ever actually believed in public-key
cryptography.

I took that to mean that in the faster-than-light zone (the "Beyond"),
it's likely possible to solve NP-hard problems efficiently, and it
seemed to me that that in the Transcend (where the Blight came from,
if anyone is reading this who hasn't read the book) it might be
possible to solve PSPACE-hard problems efficiently.  The remark about
public key crypto may mean even here in the slow zone,
NP-intersect-coNP sits inside P.  Someone once asked the author about
the crypto stuff and he said it was just an offhand remark tossed into
the book, and he wasn't really trying to express anything deep.

So you're right, I'm not trying to really program against the Blight;
I'm ok with the usual model (e.g. Goldreich "Foundations of
Cryptography" vol. 1) that the opponent is limited to probabilistic
P-time computations and that certain known primitives really do have
the properties claimed for them (e.g. AES is a pseudorandom
permutation).  Of course that last part may be more mystical and
wishful than the science fiction stuff.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186511230.162192.71430@g4g2000hsf.googlegroups.com>
 > Actually every serious Lisp compiler I know of for general purpose
> computers has a way to turn off the runtime checks for speed, and
> programs running with the checks turned off are not verified at all.
> The Lisp code at that point is no different than C code (argggh!!!).

This is an implementation issue more than anything else. You don't
_need_ to turn off the runtime checking to get adequate performance
out of Lisp. You just need good compiler technology to squeeze out the
tests.

> Right, but that only shows that the test cases don't uncover any
> errors.  It doesn't show that there are no errors.

My point was that experience shows that it's not common for type
errors to show up in usage that aren't caught in testing. Yes, this
does not guarantee that there are no type errors, but if they don't
show up in practice, who cares about such guarantees?

> This is kind of interesting, but if the vast majority of that code can
> be type-inferred, then why stop there, instead of building the type
> inference into the compiler and flagging any problems?

Because it's very difficult to reconcile a type analysis algorithm
with the kind of dynamism Lisp programmers want. Yes, an expensive
whole-program analysis
http://groups.google.com/group/comp.lang.lisp/browse_thread/thread/7b1ab36f5d5cce0a/871740da88ecd295can
prove most of the code monomorphic, but the second you create a new
class or evaluate a new expression, you have to rerun that analysis.

The technology is certainly there today to build some really powerful
static type-checking tools for Lisp (or Smalltalk or Python, etc). The
fact that nobody has done so suggests that people really aren't
suffering from the lack of static type safety.

> Well, one thing that got me interested in these statically typed
> languages (besides freakish paranoia) was seeing their users beat Lisp
> users in the ICFP programming contest year after year after year.

Speaking of toy programs...
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhihjiln4dt10@corp.supernews.com>
Rayiner Hashem wrote:
> This is an implementation issue more than anything else. You don't
> _need_ to turn off the runtime checking to get adequate performance
> out of Lisp. You just need good compiler technology to squeeze out the
> tests.

Then you'll be able to make this Lisp ray tracer safer and faster to the
extent that it is competitive:

  http://www.ffconsultancy.com/languages/ray_tracer/

>> Right, but that only shows that the test cases don't uncover any
>> errors.  It doesn't show that there are no errors.
> 
> My point was that experience shows that it's not common for type
> errors to show up in usage that aren't caught in testing. Yes, this
> does not guarantee that there are no type errors, but if they don't
> show up in practice, who cares about such guarantees?

Even if they didn't show up in practice (which is contrary to statements
from several people here), it would still accelerate development because
running a static type checker is faster than designing, implementing and
running unit tests.

> The technology is certainly there today to build some really powerful
> static type-checking tools for Lisp (or Smalltalk or Python, etc). The
> fact that nobody has done so suggests that people really aren't
> suffering from the lack of static type safety.

On the contrary, all modern functional programming languages evolved from
Lisp and static type systems are a ubiquitous theme among them: SML, CAML,
OCaml, Haskell, F#...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186530498.347908.27360@w3g2000hsg.googlegroups.com>
> Then you'll be able to make this Lisp ray tracer safer and faster to the
> extent that it is competitive:
>
>  http://www.ffconsultancy.com/languages/ray_tracer/

Ah, shoddy data analysis at it's best. You have languages mixed
together with implementations, you have on-line compilers mixed
together with off-line ones, you've got whole-program optimizers mixed
in with local ones, and you've got a whole host of implementation-
quality factors mixed-in. You're not actually testing anything other
than how these particular implementations perform on these particular
benchmarks. And I don't mean this in the abstract sense of "you can
never test anything but a particular implementation of a language",
but rather the concrete sense of "your implementations have so many
other variables that language-differences are in the noise".

Have you done any detailed profiling to figure out exactly what you're
testing? Because you've got the most dynamic language (scheme with no
type declarations) running right behind gcc, and ahead of highly
static languages (ML, Haskell). You've got 1 implementation of SML
outperforming another implementation of SML by about the same margin
as O'Caml outperforms Lisp. Whatever conclusions you can draw from
this data, "dynamically-typed languages are inherently slower than
statically-typed ones" is not it.

My guess is what you're really measuring is the quality of the code
generator/register allocator. This would explain why SBCL and MLton
perform similarly (both have fairly naive code generators), while
O'Caml performs so well (O'Caml's code generator is legendary). It
would also explain why Stalin is about as fast as GCC (it uses GCC as
its code generator). I can't explain why MLton is 50% faster than SML/
NJ, despite compiling the same language, but looking at the consing
rate of the SML/NJ program might help (this is often SML/NJ's weak-
spot).

> Even if they didn't show up in practice (which is contrary to statements
> from several people here),

And in accordance with the statements of several other people here...

> it would still accelerate development because
> running a static type checker is faster than designing, implementing and
> running unit tests.

Ah yes, because static type checking obviates the need for unit-
testing...

> On the contrary, all modern functional programming languages evolved from
> Lisp and static type systems are a ubiquitous theme among them: SML, CAML,
> OCaml, Haskell, F#...

Typed functional programming languages parted ways from Lisp with ML
in the 1970s. Meanwhile, Smalltalk and Self, which arose after ML,
both chose to retain dynamic typing, even expanding their role. Modern
mainstream languages (C#/Java) chose to retain a Smalltalk-inspired
model of OOP, instead of a more static object system along the lines
of something like O'Caml.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk9uci2mjhkc1@corp.supernews.com>
Rayiner Hashem wrote:
> You're not actually testing anything other
> than how these particular implementations perform on these particular
> benchmarks.

I'm not sure what else you were expecting...

> And I don't mean this in the abstract sense of "you can 
> never test anything but a particular implementation of a language",
> but rather the concrete sense of "your implementations have so many
> other variables that language-differences are in the noise".

You may substantiate that by benchmarking the same programs to get wildly
different results.

> Have you done any detailed profiling to figure out exactly what you're
> testing? Because you've got the most dynamic language (scheme with no
> type declarations)

and compiled into a completely inextensible form.

> running right behind gcc, and ahead of highly static languages (ML,
> Haskell). 

Haskell is the only lazy language in this benchmark.

> You've got 1 implementation of SML outperforming another implementation of
> SML 

The static MLton outperforming the dynamic (interactive) SMLNJ.

> Whatever conclusions you can draw from
> this data, "dynamically-typed languages are inherently slower than
> statically-typed ones" is not it.

On the contrary, the only exception to that is a subset of Scheme compiled
for several minutes by the whole-program optimizing compiler Stalin into an
inextensible form. Calling that "dynamic" is a triumph of hope over
reality.

> My guess is what you're really measuring is the quality of the code
> generator/register allocator.

The bottlenecks are different in different languages. The 1st (safe,
dynamic) Lisp is limited by run-time checks and is 4x slower than the 1st
OCaml. The next three Lisp implementations are limited by the performance
of the run time (allocator and GC). The final Lisp implementation may well
be limited by the code generator.

> This would explain why SBCL and MLton 
> perform similarly (both have fairly naive code generators), while
> O'Caml performs so well (O'Caml's code generator is legendary).

MLton beat OCaml in 32-bit.

> It 
> would also explain why Stalin is about as fast as GCC (it uses GCC as
> its code generator). I can't explain why MLton is 50% faster than SML/
> NJ, despite compiling the same language, but looking at the consing
> rate of the SML/NJ program might help (this is often SML/NJ's weak-
> spot).

The whole-program optimizing compilers MLton and Stalin both do
disproportionately well thanks to the static analysis they can do.

>> it would still accelerate development because
>> running a static type checker is faster than designing, implementing and
>> running unit tests.
> 
> Ah yes, because static type checking obviates the need for unit-
> testing...

Most of it, yes.

>> On the contrary, all modern functional programming languages evolved from
>> Lisp and static type systems are a ubiquitous theme among them: SML,
>> CAML, OCaml, Haskell, F#...
> 
> Typed functional programming languages parted ways from Lisp with ML
> in the 1970s. Meanwhile, Smalltalk and Self, which arose after ML,
> both chose to retain dynamic typing, even expanding their role.

Yes, the vast majority of new languages are dynamically typed. Indeed, most
of the languages I have created were dynamically typed.

> Modern 
> mainstream languages (C#/Java) chose to retain a Smalltalk-inspired
> model of OOP, instead of a more static object system along the lines
> of something like O'Caml.

Java, C# and Visual BASIC have also both adopted statically-typed parametric
polymorphism. Indeed, the .NET implementation was created by the same
person who is now bringing us Microsoft's full-fledged statically-typed
functional programming language F#.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186625928.720159.27190@19g2000hsx.googlegroups.com>
I cut a lot of stuff out because I wanted you to focus on a basic
point:

> of the run time (allocator and GC). The final Lisp implementation may well
> be limited by the code generator.

Among other things. Look, the basic point you're missing is that
you're testing completely different implementations and then claiming
that it proves something about static versus dynamic typing. SBCL is a
dynamic compilation environment that has to assume functions and
classes can be redefined at runtime. It compiles a language that
doesn't compromise its integer types to fit primitive machine types.
It limits its analysis to keep interactive compilation speeds. None of
these things are intrinsic to dynamic typing, but they make
compilation much harder.

Moreover, SBCL's compiler was started in 1985, and most of the
technology is late 1980's, early 1990's stuff. Meanwhile, MLton was
started in 1997, and uses a lot of modern technology and a lot of
expensive whole-program analysis. SML/NJ is older, but its MLRISC back-
end is state of the art, and is written by some of the same people
that invented some modern optimization techniques. I don't know the
history of the O'Caml compiler, but O'Caml wasn't even created until
the CMUCL project had been over for a couple of years. I don't in
general subscribe to the idea that "newer is better", but the simple
fact is that a lot of optimization technology was created between
1985-1994 and today, and while there is no reason SBCL couldn't use
it, the just doesn't.

Even given these facts, it's incredible how well SBCL does. It beats
Java despite the fact that the JVM has some of the smartest people in
code optimization working on it (to be fair, as a JIT, it also has to
do its optimization under ridiculous time constraints!) It beats SML/
NJ, which is a similarly impressive feat.

It's also interesting that you dismiss off-hand the performance of
Stalin. Perhaps you don't really understand that Stalin versus GCC is
really the closest comparison of the performance potential of dynamic
languages that you have.

> MLton beat OCaml in 32-bit.

Ah right, I was confused by the lack of a 64-bit MLton result.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmjvsf18j33e5@corp.supernews.com>
Rayiner Hashem wrote:
>> of the run time (allocator and GC). The final Lisp implementation may
>> well be limited by the code generator.
> 
> Among other things. Look, the basic point you're missing is that
> you're testing completely different implementations and then claiming
> that it proves something about static versus dynamic typing.

I made a testable hypothesis and I tested it. The hypothesis was not
disproven: it stands.

> SBCL is a dynamic compilation environment that has to assume functions and
> classes can be redefined at runtime.

The performance critical functions were inlined and cannot be changed.

> It compiles a language that doesn't compromise its integer types to fit
> primitive machine types. 

The integers are declared to be fixnums in performance-critical code and
SBCL can optimize them accordingly.

> It limits its analysis to keep interactive compilation speeds.

Declarations in the code request maximal static optimization. Yet SML/NJ and
OCaml are faster at compiling.

There are also counter points. For example, the SML and OCaml are safe but
the Lisp is not (except for the first implementation).

> Moreover, SBCL's compiler was started in 1985, and most of the
> technology is late 1980's, early 1990's stuff. Meanwhile, MLton was
> started in 1997, and uses a lot of modern technology and a lot of
> expensive whole-program analysis. SML/NJ is older, but its MLRISC back-
> end is state of the art, and is written by some of the same people
> that invented some modern optimization techniques. I don't know the
> history of the O'Caml compiler, but O'Caml wasn't even created until
> the CMUCL project had been over for a couple of years. I don't in
> general subscribe to the idea that "newer is better", but the simple
> fact is that a lot of optimization technology was created between
> 1985-1994 and today, and while there is no reason SBCL couldn't use
> it, the just doesn't.

Asserting that dynamic typing might not be slower in theory is untestable.

> Even given these facts, it's incredible how well SBCL does. It beats
> Java despite the fact that the JVM has some of the smartest people in
> code optimization working on it (to be fair, as a JIT, it also has to
> do its optimization under ridiculous time constraints!)

This is contrary to the evidence:

1. The compile times for those languages are all <13% of Java's run times.
2. SML/NJ, ocamlopt and g++ all compile faster than Java.
3. Java has an equal opportunity to statically optimize.

> It's also interesting that you dismiss off-hand the performance of
> Stalin. Perhaps you don't really understand that Stalin versus GCC is 
> really the closest comparison of the performance potential of dynamic
> languages that you have.

Stalin has shown that a suitably written subset of Scheme can be statically
optimized into a very fast implementation with enough effort. Comparing the
inextensible and non-interactive Stalin-compiled Scheme with the
inextensible and non-interactive C++ says little about static vs dynamic
though: both are static.

>> MLton beat OCaml in 32-bit.
> 
> Ah right, I was confused by the lack of a 64-bit MLton result.

There are still no 64-bit optimizing SML compilers AFAIK.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186687791.623577.138690@m37g2000prh.googlegroups.com>
> I made a testable hypothesis and I tested it. The hypothesis was not
> disproven: it stands.

If your hypothesis was "does O'Caml beat SBCL", then yeah, sure. But
you keep trotting out this experiment to assert that "Ocaml is faster
than Lisp" or "static languages are faster than dynamic languages".
That's a broader assertion, and your evidence does not support it.
Heck, the title of the page is even "Ray tracer language comparison"!

> The performance critical functions were inlined and cannot be changed.

Inlining isn't the only issue. A dynamic compilation environment just
cannot use some optimizations because in general it has to support
redefining code. Put another way, it doesn't make sense to implement
certain whole-program optimizations because in your general use-case,
you won't be able to take advantage of them.

> Asserting that dynamic typing might not be slower in theory is untestable.

Yet you repeatedly assert that dynamic typing is indeed slower than
static typing, or at least that Lisp is slower than statically-typed
languages.

In any case, you dodged the real issue, which is that the guts of SML/
NJ, Ocaml, and especially MLton are a lot more modern than the guts of
SBCL. The rule of thumb is that optimization techniques improve code
performance by 2x every 18 years. There is probably a 10-year
difference between the techniques in SBCL and MLton. This equates to
an expected 50% speedup in MLton from improved algorithms alone, which
is less than the 40% difference between 32-bit MLton and 32-bit SBCL.
If you're really comparing languages, as the title of your page
claims, then why do you take no steps to correct for this confounding
factor?

> 1. The compile times for those languages are all <13% of Java's run times.

That doesn't mean anything. The JVM's compiler has to be fast enough
to provide acceptable JIT times on large programs. The same algorithms
are used even if you're compiling a small program like your
benchmark.

> 2. SML/NJ, ocamlopt and g++ all compile faster than Java.
> 3. Java has an equal opportunity to statically optimize.

The Java compiler doesn't do much if any static optimization. It has
to output Java bytecodes, remember?

> Stalin has shown that a suitably written subset of Scheme can be statically
> optimized into a very fast implementation with enough effort.

Stalin does not support a full numeric tower (not an R5RS requirement
anyway), but that just makes its arithmetic model no harder than ML's
or O'Caml's. It doesn't support R5RS macros, but that doesn't impact
performance.

> inextensible and non-interactive Stalin-compiled Scheme with the
> inextensible and non-interactive C++ says little about static vs dynamic
> though: both are static

Actually, it says exactly what you want to know: when I make things as
similar as possible, with the only major difference between dynamic
typing versus static typing, can I get similar performance? And what
do you know: you can!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmuiqbs8t6g9f@corp.supernews.com>
Rayiner Hashem wrote:
>> The performance critical functions were inlined and cannot be changed.
> 
> Inlining isn't the only issue.

It was enough to invalidate your point.

> A dynamic compilation environment just 
> cannot use some optimizations because in general it has to support
> redefining code.

That explains Lisp's poor performance.

> The rule of thumb is that optimization techniques improve code
> performance by 2x every 18 years...

Eh? For what code and where is any evidence of this?

>> 1. The compile times for those languages are all <13% of Java's run
>> times.
> 
> That doesn't mean anything.

It means that Java had every chance to perform well but it didn't.

> The JVM's compiler has to be fast enough  
> to provide acceptable JIT times on large programs. The same algorithms
> are used even if you're compiling a small program like your
> benchmark.

Java might give more competitive performance on larger programs. I suspect
not, but have no evidence either way.

>> 2. SML/NJ, ocamlopt and g++ all compile faster than Java.
>> 3. Java has an equal opportunity to statically optimize.
> 
> The Java compiler doesn't do much if any static optimization.

That explains Java's poor performance.

> It has to output Java bytecodes, remember?

So do C# and F#, both of which are significantly faster than Java.

>> inextensible and non-interactive Stalin-compiled Scheme with the
>> inextensible and non-interactive C++ says little about static vs dynamic
>> though: both are static
> 
> Actually, it says exactly what you want to know: when I make things as
> similar as possible, with the only major difference between dynamic
> typing versus static typing, can I get similar performance? And what
> do you know: you can!

If you call two orders of magnitude slower compilation "similar".

The conclusion I would draw is: whole program optimizers can transform
dynamic programs into static programs and regain performance. I think that
is interesting but of little practical relevance because the advantages of
dynamic typing are lost. Moreover, we do not know if the optimizations
performed scale to larger programs, i.e. are they non-linear?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186698275.860365.166130@e9g2000prf.googlegroups.com>
> It was enough to invalidate your point.

No it isn't. Interactive compilation is a design constraint that
filters through the whole system, regardless of whether in any
particular benchmark you manage to inline most of the functions.

> That explains Lisp's poor performance.

Getting within a factor of 2 of GCC is not "poor" by any stretch of
the imagination. That's like the difference between GCC -O1 and GCC -
O2 on a lot of codes...

> Eh? For what code and where is any evidence of this?

http://citeseer.ist.psu.edu/446305.html

Look, the CMUCL project ended in 1994. Want to dig up a copy of Visual
C++ 1.5 and test it against that?

> It means that Java had every chance to perform well but it didn't.

You're assuming that the JVM should somehow detect that its compiling
a small toy program, and use more expensive optimization techniques
than it would in real code.

> Java might give more competitive performance on larger programs. I suspect
> not, but have no evidence either way.

It won't give any more competitive performance on larger programs. The
optimizer is what it is. It's designed to be fast enough to JIT a big
program in adequately fast, and the code is as good as it can do with
that constraint.

> That explains Java's poor performance.

The JVM is working with design constraints the others aren't. It's
performance in the context of those design constraints is really quite
impressive.

> So do C# and F#, both of which are significantly faster than Java.

I haven't seen any benchmarks, so I couldn't say. Microsoft's compiler
team is really good, though, so I'm not really surprised.

> If you call two orders of magnitude slower compilation "similar".

That's an engineering issue, not a fundamental weakness of the
technique.

> is interesting but of little practical relevance because the advantages of
> dynamic typing are lost.

Only if all of your code needs 100% performance. This is rare. For
most large programs, only 10% is performance-sensitive. If that 10% is
written in a static style, the compiler can extract good performance,
and allow the rest to be written in a more dynamic style. Also, you
ignore the fact that code may be dynamic, but a particular use of the
code may not be. Eg: consider something like a AWT, where you want the
flexibility to have things like WindowsButton and GtkButton, but where
any given compile will only use one or the other.

> Moreover, we do not know if the optimizations
> performed scale to larger programs, i.e. are they non-linear?

AFAIK, Stalin's analyses are based on Shivers's 0cfa algorithm, which
is cubic in the whole program. However, there are ways to make it run
fast in practice. Wang uses online cycle elimination to type 20 KLOC
of Java code in 5-10 seconds. There is also an implementation of
Andersen's algorithm (very similar to 0cfa in principle), which can do
pointer analysis for 1 MLOC of C code in a second.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bnc509i4426d2@corp.supernews.com>
Rayiner Hashem wrote:
>> It was enough to invalidate your point.
> 
> No it isn't. Interactive compilation is a design constraint that
> filters through the whole system, regardless of whether in any
> particular benchmark you manage to inline most of the functions.

What else is slowed down by being dynamic besides the cost of the function
calls that are no longer there?

>> Eh? For what code and where is any evidence of this?
> 
> http://citeseer.ist.psu.edu/446305.html

The authors are trying to correlate optimizations with time yet they make no
attempt to objectively quantify time, opting instead to use the rather
dubious assumption that turning off optimizations on a modern compiler
provides results representative of compiler technology 36 years ago.

> Look, the CMUCL project ended in 1994. Want to dig up a copy of Visual
> C++ 1.5 and test it against that?

That would be much more interesting. Just for fun (32-bit):

                    -O0     -O3
g++ v2.95 (2001): 10.435s  4.991s
g++ v4.2  (2007): 15.317s  4.926s

So the optimized performance has not improved significantly in 6 years but
the unoptimized performance has gotten substantially worse.

>> It means that Java had every chance to perform well but it didn't.
> 
> You're assuming...

I made no assumptions: that was a statement of fact.

> The JVM is working with design constraints the others aren't.

Maybe the next Java will be better designed.

>> So do C# and F#, both of which are significantly faster than Java.
> 
> I haven't seen any benchmarks, so I couldn't say. Microsoft's compiler
> team is really good, though, so I'm not really surprised.

I should probably post results for C# and F# running under .NET and Mono...

>> If you call two orders of magnitude slower compilation "similar".
> 
> That's an engineering issue, not a fundamental weakness of the
> technique.

I'll believe it when I see it.

>> is interesting but of little practical relevance because the advantages
>> of dynamic typing are lost.
> 
> Only if all of your code needs 100% performance. This is rare. For
> most large programs, only 10% is performance-sensitive. If that 10% is
> written in a static style, the compiler can extract good performance,
> and allow the rest to be written in a more dynamic style.

Whole program optimization requires the whole program.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186714802.878133.295790@q3g2000prf.googlegroups.com>
> What else is slowed down by being dynamic besides the cost of the function
> calls that are no longer there?

Potentially, it slows down anything driven by inter-procedural
analysis (unboxing, stack-allocation, type inference, etc).

>                     -O0     -O3
> g++ v2.95 (2001): 10.435s  4.991s
> g++ v4.2  (2007): 15.317s  4.926s
>
> So the optimized performance has not improved significantly in 6 years but
> the unoptimized performance has gotten substantially worse.

Those results are not generally representative. Look at the SPECint
history of GCC at: http://www.suse.de/~aj/SPEC/CINT/sandbox-b/index.html
(overall SPECint results at the very end)

GCC 3.x's SPECint performance increased about 6% from Jul '01 to Jan
'04. That's about 2.4% per year. The performance of GCC 3.x in Jan '04
relative to the performance of GCC 2.95.3 from March '01 is about 8.4%
higher. That's almost 3% per year. Note also this figures track the
performance of mainline GCC at the time when much of the optimizing
effort was being put into tree-ssa (which was marged in early 2004).
So they are, if anything, conservative.

Actually, when you factor in discontinuities (eg: SSA in the early
'90s), Proebsting's 4% per year sounds about right. Certainly, it's
not unusual for Intel or IBM to make double-digit improvements in a
new version of ICC or XLC.

> I made no assumptions: that was a statement of fact.

Let's put this in the original context:

">> 1. The compile times for those languages are all <13% of Java's
run
>> times.
> That doesn't mean anything.

It means that Java had every chance to perform well but it didn't. "

You're saying that Java had every chance to perform well, because it
could've spent a couple of seconds optimizing, just like the other
compilers, and that still would've been a small fraction of its run-
time. The point you miss is that spending even a second optimizing 150
lines of code would kill the JVM's performance at its design point. On
real programs, it has to do KLOCs of code in that time. On my machine,
the Java server VM spends 0.1 seconds JIT'ing your benchmark, while g+
+ spends 7-8x as much time.

> I'll believe it when I see it.

It's in the literature, and the basic technology has been used in
production systems, albeit mostly outside the context of Lisp.

> Whole program optimization requires the whole program.

This statement doesn't make any sense as a reply to my comment.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cansa7nv2cqc2@corp.supernews.com>
Rayiner Hashem wrote:
>> What else is slowed down by being dynamic besides the cost of the
>> function calls that are no longer there?
> 
> Potentially, it slows down anything driven by inter-procedural
> analysis (unboxing, stack-allocation, type inference, etc).

The inlining probably removes those problems in this case.

>>                     -O0     -O3
>> g++ v2.95 (2001): 10.435s  4.991s
>> g++ v4.2  (2007): 15.317s  4.926s
>>
>> So the optimized performance has not improved significantly in 6 years
>> but the unoptimized performance has gotten substantially worse.
> 
> Those results are not generally representative.

That counter-example undermines the core assumption in the paper you cited.
Specifically, -O0 is not representative of compiler optimization technology
from 36 years ago.

> Look at the SPECint 
> history of GCC at: http://www.suse.de/~aj/SPEC/CINT/sandbox-b/index.html
> (overall SPECint results at the very end)

This is a vastly better test of Proebsting's hypothesis. However, if you
perform regression on that data to compute the gradient with error, I doubt
it will be significantly non-zero.

> GCC 3.x's SPECint performance increased about 6% from Jul '01 to Jan
> '04. That's about 2.4% per year.

What is the error on your estimate? I suspect >100%% error.

> The performance of GCC 3.x in Jan '04 
> relative to the performance of GCC 2.95.3 from March '01 is about 8.4%
> higher. That's almost 3% per year. Note also this figures track the
> performance of mainline GCC at the time when much of the optimizing
> effort was being put into tree-ssa (which was marged in early 2004).
> So they are, if anything, conservative.

There are several problems here:

1. Significance: confidence is low because the proportionate increase is
well within the noise.

2. Extrapolation: this data spans only 3 years but you were discussing
decades of change. A single invention in those particular 3 years would
spoil the results.

3. Inconsistencies: you previously said that SBCL represented the state of
optimization technology when it was first invented (i.e. it has not
improved) but now you're saying that GCC tracks the current state of the
art (i.e. it continually improved).

4. Self-selection: SPEC is the defacto-standard benchmark and GCC was most
likely being optimized specifically for this benchmark, so results drawn
from this benchmark alone are biased and you need to create a new benchmark
if you want to be objective.

> Actually, when you factor in discontinuities (eg: SSA in the early
> '90s), Proebsting's 4% per year sounds about right. Certainly, it's
> not unusual for Intel or IBM to make double-digit improvements in a
> new version of ICC or XLC.

That is a fine alternative hypothesis but I have yet to see anything
resembling objective quantitative evidence supporting that hypothesis with
significant confidence.

> ">> 1. The compile times for those languages are all <13% of Java's
> run
>>> times.
>> That doesn't mean anything.
> 
> It means that Java had every chance to perform well but it didn't. "
> 
> You're saying that Java had every chance to perform well, because it
> could've spent a couple of seconds optimizing,

You are speculating how much more optimization effort is required to get
Java's performance up to par.

> just like the other compilers,

OCaml compiled 5x faster than you imply. SML/NJ was faster still.

> and that still would've been a small fraction of its run-time.

6% of its run-time.

> The point you miss is that spending even a second optimizing 150 
> lines of code would kill the JVM's performance at its design point. On
> real programs, it has to do KLOCs of code in that time.

Speculation about the time required to do a good job optimizing Java
bytecode does not undermine the objective statement of fact that I made.

> On my machine, 
> the Java server VM spends 0.1 seconds JIT'ing your benchmark, while g+
> + spends 7-8x as much time.

My belief is actually that there are two main problems here:

1. Concurrent GC has a grave performance cost for allocation-intensive code
and the current Java compilers make no attempt to offset this (e.g. region
analysis).

2. Java bytecode does not convey enough information to the JIT optimizer
despite the fact that there was a lot of prior work on this (e.g. decent
static type systems).

I think there is a strong argument that these are design flaws in Java that
are responsible for its performance. As an aside, I find it fascinating
that Lispers manage to single out Java for their performance comparisons
because it is one of the few compiled languages to be as slow as Lisp. Java
is the basis of many wild generalizations about static type systems...

>> I'll believe it when I see it.
> 
> It's in the literature, and the basic technology has been used in
> production systems, albeit mostly outside the context of Lisp.

Any references?

>> Whole program optimization requires the whole program.
> 
> This statement doesn't make any sense as a reply to my comment.

You were saying that whole program optimization could be used to optimize
only 10% of the code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqnio9914e732@corp.supernews.com>
Rayiner Hashem wrote:
>> What else is slowed down by being dynamic besides the cost of the
>> function calls that are no longer there?
> 
> Potentially, it slows down anything driven by inter-procedural
> analysis (unboxing, stack-allocation, type inference, etc).

The inlining probably removes those problems in this case.

>>                     -O0     -O3
>> g++ v2.95 (2001): 10.435s  4.991s
>> g++ v4.2  (2007): 15.317s  4.926s
>>
>> So the optimized performance has not improved significantly in 6 years
>> but the unoptimized performance has gotten substantially worse.
> 
> Those results are not generally representative.

That counter-example undermines the core assumption in the paper you cited.
Specifically, -O0 is not representative of compiler optimization technology
from 36 years ago.

> Look at the SPECint 
> history of GCC at: http://www.suse.de/~aj/SPEC/CINT/sandbox-b/index.html
> (overall SPECint results at the very end)

This is a vastly better test of Proebsting's hypothesis. However, if you
perform regression on that data to compute the gradient with error, I doubt
it will be significantly non-zero.

> GCC 3.x's SPECint performance increased about 6% from Jul '01 to Jan
> '04. That's about 2.4% per year.

What is the error on your estimate? I suspect >100%% error.

> The performance of GCC 3.x in Jan '04 
> relative to the performance of GCC 2.95.3 from March '01 is about 8.4%
> higher. That's almost 3% per year. Note also this figures track the
> performance of mainline GCC at the time when much of the optimizing
> effort was being put into tree-ssa (which was marged in early 2004).
> So they are, if anything, conservative.

There are several problems here:

1. Significance: confidence is low because the proportionate increase is
well within the noise.

2. Extrapolation: this data spans only 3 years but you were discussing
decades of change. A single invention in those particular 3 years would
spoil the results.

3. Inconsistencies: you previously said that SBCL represented the state of
optimization technology when it was first invented (i.e. it has not
improved) but now you're saying that GCC tracks the current state of the
art (i.e. it continually improved).

4. Self-selection: SPEC is the defacto-standard benchmark and GCC was most
likely being optimized specifically for this benchmark, so results drawn
from this benchmark alone are biased and you need to create a new benchmark
if you want to be objective.

> Actually, when you factor in discontinuities (eg: SSA in the early
> '90s), Proebsting's 4% per year sounds about right. Certainly, it's
> not unusual for Intel or IBM to make double-digit improvements in a
> new version of ICC or XLC.

That is a fine alternative hypothesis but I have yet to see anything
resembling objective quantitative evidence supporting that hypothesis with
significant confidence.

> ">> 1. The compile times for those languages are all <13% of Java's
> run
>>> times.
>> That doesn't mean anything.
> 
> It means that Java had every chance to perform well but it didn't. "
> 
> You're saying that Java had every chance to perform well, because it
> could've spent a couple of seconds optimizing,

You are speculating how much more optimization effort is required to get
Java's performance up to par.

> just like the other compilers,

OCaml compiled 5x faster than you imply. SML/NJ was faster still.

> and that still would've been a small fraction of its run-time.

6% of its run-time.

> The point you miss is that spending even a second optimizing 150 
> lines of code would kill the JVM's performance at its design point. On
> real programs, it has to do KLOCs of code in that time.

Speculation about the time required to do a good job optimizing Java
bytecode does not undermine the objective statement of fact that I made.

> On my machine, 
> the Java server VM spends 0.1 seconds JIT'ing your benchmark, while g+
> + spends 7-8x as much time.

My belief is actually that there are two main problems here:

1. Concurrent GC has a grave performance cost for allocation-intensive code
and the current Java compilers make no attempt to offset this (e.g. region
analysis).

2. Java bytecode does not convey enough information to the JIT optimizer
despite the fact that there was a lot of prior work on this (e.g. decent
static type systems).

I think there is a strong argument that these are design flaws in Java that
are responsible for its performance. As an aside, I find it fascinating
that Lispers manage to single out Java for their performance comparisons
because it is one of the few compiled languages to be as slow as Lisp. Java
is the basis of many wild generalizations about static type systems...

>> I'll believe it when I see it.
> 
> It's in the literature, and the basic technology has been used in
> production systems, albeit mostly outside the context of Lisp.

Any references?

>> Whole program optimization requires the whole program.
> 
> This statement doesn't make any sense as a reply to my comment.

You were saying that whole program optimization could be used to optimize
only 10% of the code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186848574.815773.237290@x35g2000prf.googlegroups.com>
> The inlining probably removes those problems in this case.

It depends on the internals of the compiler. I personally think that
the code generator was the bottleneck in the fastest SBCL case, but
the open-world compiler is definitely why the code had to be declared
as heavily as it was.

> This is a vastly better test of Proebsting's hypothesis. However, if you
> perform regression on that data to compute the gradient with error, I doubt
> it will be significantly non-zero.

I'm not trying to prove Proebsting's law here. I'm not the one with
the webpage which I regularly trot out to make significant claims
about entire genres of languages. Think of me as the heckler at the
conference informally pointing out all the oversights in your
research. You're making the claim, it's your job to convince us that
you did an adequate job of taking into account confounding factors.
Your page doesn't even make an ATTEMPT to do this.

> 3. Inconsistencies: you previously said that SBCL represented the state of
> optimization technology when it was first invented (i.e. it has not
> improved) but now you're saying that GCC tracks the current state of the
> art (i.e. it continually improved).

SBCL has improved, but if you look at the algorithms, it does indeed
reflect the state of optimization technology circa 1990. Moreover GCC
has tons of people working on it, including full-time people at
RedHat, Novell, IBM, AMD, and other large companies. SBCL doesn't. I
will assert, without proof, that you can buy performance by throwing
money and talent at a problem. You may choose not to believe this, but
if you had even a passing familiarity with the engineering world, you
would.

> That is a fine alternative hypothesis but I have yet to see anything
> resembling objective quantitative evidence supporting that hypothesis with
> significant confidence.

I'm not publishing here, just trying to convey to you an understanding
of the field that you seem to lack. Yes, optimization technology has
improved a lot since 1985. Lot's of smart people spend a lot of money
keeping commercial compilers up to date with technology, and lot's of
PHDs spend large amounts of time making new lists of things for them
to implement. You're welcome to believe that none of it amounts to
anything, and it's all for naught, but I don't think anyone is going
to agree with you.

> You are speculating how much more optimization effort is required to get
> Java's performance up to par.

I'm not speculating. More time to optimize = more performance. That's
just generally the case with NP-complete problems --- the more time
you can afford to throw at a solution, the better your solution will
be.

> 1. Concurrent GC has a grave performance cost for allocation-intensive code

Where did you get this idea? The Java concurrent GC is only used to
collect the old generation, the young generation behaves like a
regular copying collector. The particular algorithm used in the JRE
also doesn't require a reader barrier, which would slow the code down.

> 2. Java bytecode does not convey enough information to the JIT optimizer
> despite the fact that there was a lot of prior work on this (e.g. decent
> static type systems).

I'd really like to see you elaborate on this point...

> Any references?

http://research.microsoft.com/~maf/talks/Dagstuhl99.ppt (summarizes
some of the work)

http://citeseer.ist.psu.edu/202602.html (the basic work on cycle
elimination)

http://citeseer.ist.psu.edu/414978.html (implements 0cfa on 20 KLOC in
5-10 sec)

http://citeseer.ist.psu.edu/heintze01ultrafast.html (implements
Andersen's on 1 MLOC in 1 sec)

> You were saying that whole program optimization could be used to optimize
> only 10% of the code.

No, I said that only 10% of the code could be written in a static
style, which whole-program optimization could generate efficient code
for. The remaining 90% of code could be inherently dynamic, and whole-
program analysis couldn't change that, but it wouldn't be as
performance-critical.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bsbt4127iqeec@corp.supernews.com>
Rayiner Hashem wrote:
>> The inlining probably removes those problems in this case.
> 
> It depends on the internals of the compiler. I personally think that
> the code generator was the bottleneck in the fastest SBCL case, but
> the open-world compiler is definitely why the code had to be declared
> as heavily as it was.

I think that is likely, yes.

>> This is a vastly better test of Proebsting's hypothesis. However, if you
>> perform regression on that data to compute the gradient with error, I
>> doubt it will be significantly non-zero.
> 
> I'm not trying to prove Proebsting's law here. I'm not the one with
> the webpage which I regularly trot out to make significant claims
> about entire genres of languages. Think of me as the heckler at the
> conference informally pointing out all the oversights in your
> research.

Your explanations of why Java and Lisp are so slow do not undermine my
statements.

> You're making the claim... 

I had never even heard of Proebsting's hypothesis.

>> 3. Inconsistencies: you previously said that SBCL represented the state
>> of optimization technology when it was first invented (i.e. it has not
>> improved) but now you're saying that GCC tracks the current state of the
>> art (i.e. it continually improved).
> 
> SBCL has improved, but if you look at the algorithms, it does indeed
> reflect the state of optimization technology circa 1990. Moreover GCC
> has tons of people working on it, including full-time people at
> RedHat, Novell, IBM, AMD, and other large companies. SBCL doesn't. I
> will assert, without proof, that you can buy performance by throwing
> money and talent at a problem. You may choose not to believe this, but
> if you had even a passing familiarity with the engineering world, you
> would.

If that were true you would expect commercial offerings from large companies
to dominate high performance.

>> That is a fine alternative hypothesis but I have yet to see anything
>> resembling objective quantitative evidence supporting that hypothesis
>> with significant confidence.
> 
> I'm not publishing here, just trying to convey to you an understanding
> of the field that you seem to lack. Yes, optimization technology has
> improved a lot since 1985. Lot's of smart people spend a lot of money
> keeping commercial compilers up to date with technology, and lot's of
> PHDs spend large amounts of time making new lists of things for them
> to implement. You're welcome to believe that none of it amounts to
> anything, and it's all for naught, but I don't think anyone is going
> to agree with you.

This is a simple statistical test, not a question of belief.

>> You are speculating how much more optimization effort is required to get
>> Java's performance up to par.
> 
> I'm not speculating.

You said "Java had every chance to perform well, because it could've spent a
couple of seconds optimizing". Where did "a couple of seconds" come from if
it was not speculation?

> More time to optimize = more performance.

In one language, yes. Between languages, no.

>> 1. Concurrent GC has a grave performance cost for allocation-intensive
>> code
> 
> Where did you get this idea?

See the recent thread in c.l.f on "Concurrent GC: a good thing?" for
example. There has been a lot of work in trying to find efficient ways to
implement concurrent GCs (see the papers by the authors of OCaml, for
example) but nobody has succeeded and, consequently, both Java and .NET are
much slower at allocating.

>> 2. Java bytecode does not convey enough information to the JIT optimizer
>> despite the fact that there was a lot of prior work on this (e.g. decent
>> static type systems).
> 
> I'd really like to see you elaborate on this point...
> 
>> Any references?
> 
> http://research.microsoft.com/~maf/talks/Dagstuhl99.ppt (summarizes
> some of the work)
> 
> http://citeseer.ist.psu.edu/202602.html (the basic work on cycle
> elimination)
> 
> http://citeseer.ist.psu.edu/414978.html (implements 0cfa on 20 KLOC in
> 5-10 sec)
> 
> http://citeseer.ist.psu.edu/heintze01ultrafast.html (implements
> Andersen's on 1 MLOC in 1 sec)

I'll check them out, thanks.

>> You were saying that whole program optimization could be used to optimize
>> only 10% of the code.
> 
> No, I said that only 10% of the code could be written in a static
> style, which whole-program optimization could generate efficient code
> for. The remaining 90% of code could be inherently dynamic, and whole-
> program analysis couldn't change that, but it wouldn't be as
> performance-critical.

I can't see how that could give the performance boost. Unless you run the
whole program optimization on the other 90% as well, you won't have the
static type information that you need in order to compile efficiently.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186873137.602316.253890@d55g2000hsg.googlegroups.com>
> Your explanations of why Java and Lisp are so slow do not undermine my
> statements.

If my statements do not convince you that you are testing
implementation performance more than language performance, there is no
hope for you.

> I had never even heard of Proebsting's hypothesis.

I wouldn't say it's common knowledge, but you'll see it pop up every
now and then. If anything, most people feel that the law is
pessimistic: http://research.microsoft.com/~toddpro/

See also: http://research.microsoft.com/~toddpro/vinod.htm

> If that were true you would expect commercial offerings from large companies
> to dominate high performance.

Only if commercial Lisp vendors are willing to spend relatively large
amounts of money to get that last 50% on low-level numeric codes. I
don't think that sort of work is what's paying their bills.

> This is a simple statistical test, not a question of belief.

In the absence of a substantial amount of data to prove the counter-
claim, going along with the general belief of a people in a particular
field is not unreasonable.

> You said "Java had every chance to perform well, because it could've spent a
> couple of seconds optimizing". Where did "a couple of seconds" come from if
> it was not speculation?

Okay, yes, I suppose technically it is speculation. In the same way as
"would sticking a 1000 HP engine into a Corolla make it go faster" is
speculation. I have never actually done this, but I do not think it is
unreasonable to speculate that it is probably the case.

> In one language, yes. Between languages, no.

I'm not sure what you're trying to say here. Are you claiming that it
should be quicker to optimize Java code than C code? 0.1 seconds is
not a lot of time to optimize a benchmark, when GCC takes 7-8x as long
to optimize the same benchmark written in an even lower-level
language.

> See the recent thread in c.l.f on "Concurrent GC: a good thing?" for
> example.

The entire thread seems to have gotten derailed pretty early, but not
before Daniel Wang told you exactly what you needed to know: if your
allocation rate is X MB/sec, it will always be faster on a multi-
processor machine to have two independent processes allocating at X MB/
sec than to have a single process with a concurrent GC allocating at 2
X MB/sec. It's just the nature of concurrent algorithms --- you almost
never see perfectly linear scalability.

However, both of those points are completely irrelevant, as far as I
can tell your benchmark is single-threaded, and the concurrency of the
GC isn't even being tested.

> but nobody has succeeded and, consequently, both Java and .NET are
> much slower at allocating.

The hard part of doing concurrent GC's is getting them to scale, not
getting them to perform well on single-threaded code.

> >> 2. Java bytecode does not convey enough information to the JIT optimizer
> >> despite the fact that there was a lot of prior work on this (e.g. decent
> >> static type systems).
>
> > I'd really like to see you elaborate on this point...

I'd still like to see this. What features of the Java bytecode inhibit
what optimizations?

> I can't see how that could give the performance boost. Unless you run the
> whole program optimization on the other 90% as well, you won't have the
> static type information that you need in order to compile efficiently.

You run the whole program optimization on the whole 100%. On the
inherently dynamic code, it doesn't help, but on the performance-
sensitive code written in a static style, it does. This way you can
flexibly trade-off the advantages of dynamic typing and the
performance of static typing as you need it.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13canumj4c3loc6@corp.supernews.com>
Rayiner Hashem wrote:
>> If that were true you would expect commercial offerings from large
>> companies to dominate high performance.
> 
> Only if commercial Lisp vendors are willing to spend relatively large
> amounts of money to get that last 50% on low-level numeric codes. I
> don't think that sort of work is what's paying their bills.

I was thinking of Java and .NET, both of which have had enormous financial
backing but neither are leaders in performance.

>> In one language, yes. Between languages, no.
> 
> I'm not sure what you're trying to say here. Are you claiming that it
> should be quicker to optimize Java code than C code?

Some languages are easier to optimize than others, so you cannot extrapolate
compile times for one language given compile times for a substantially
different language.

> 0.1 seconds is 
> not a lot of time to optimize a benchmark, when GCC takes 7-8x as long
> to optimize the same benchmark written in an even lower-level
> language.

GCC is dealing with deallocations and a Turing complete type system. So it
is not "lower level" in this context.

>> See the recent thread in c.l.f on "Concurrent GC: a good thing?" for
>> example.
> 
> The entire thread seems to have gotten derailed pretty early, but not
> before Daniel Wang told you exactly what you needed to know: if your
> allocation rate is X MB/sec, it will always be faster on a multi-
> processor machine to have two independent processes allocating at X MB/
> sec than to have a single process with a concurrent GC allocating at 2
> X MB/sec.

Sure. I'd like to know when/if languages with concurrent GC outperform those
without. I expect concurrent GC to be faster when interprocess
communication is high but I would like to quantify as much as possible.

> However, both of those points are completely irrelevant, as far as I
> can tell your benchmark is single-threaded,

I have some multithreaded versions but they are not publically available
yet.

> and the concurrency of the GC isn't even being tested.

No, concurrent GCs run concurrently with the main thread so all programs
have at least two threads.

>> but nobody has succeeded and, consequently, both Java and .NET are
>> much slower at allocating.
> 
> The hard part of doing concurrent GC's is getting them to scale, not
> getting them to perform well on single-threaded code.

Getting them to scale is certainly a hard problem but getting them to handle
rapid allocation appears to be an unsolved problem. This is a serious
problem for functional programming languages and is one of the reasons why
OCaml reverted back to non-concurrent GC.

>> >> 2. Java bytecode does not convey enough information to the JIT
>> >> optimizer despite the fact that there was a lot of prior work on this
>> >> (e.g. decent static type systems).
>>
>> > I'd really like to see you elaborate on this point...
> 
> I'd still like to see this. What features of the Java bytecode inhibit
> what optimizations?

What static type information is available, e.g. parametric polymorphism?

>> I can't see how that could give the performance boost. Unless you run the
>> whole program optimization on the other 90% as well, you won't have the
>> static type information that you need in order to compile efficiently.
> 
> You run the whole program optimization on the whole 100%. On the
> inherently dynamic code, it doesn't help, but on the performance-
> sensitive code written in a static style, it does. This way you can
> flexibly trade-off the advantages of dynamic typing and the
> performance of static typing as you need it.

The whole program optimizations rely on a closed world anything dynamic
breaks that.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187360392.554293.66700@a39g2000hsc.googlegroups.com>
> I was thinking of Java and .NET, both of which have had enormous financial
> backing but neither are leaders in performance.

Both Java and .NET have constraints that C or ML do not. Most of these
constraints are imposed by the target markets of the languages. Also,
given their target market, their performance requirements are
different. CPU performance is less important, and I/O performance,
multi-CPU scalability, etc, are more important.

Incidentally, the performance of .NET on computationally intensive
benchmarks is actually quite good. You just have to avoid writing code
that conses a lot.

> Some languages are easier to optimize than others, so you cannot extrapolate
> compile times for one language given compile times for a substantially
> different language.

In general, yes, but in this particular case, we're talking about Java
versus C++. Java is not any easier to optimize than C++.

The other aspect of this is that neither Java JITs nor C++ compilers
do extensive language-level optimization. Most of the optimization
happens on a low-level intermediate representation, and much of the
performance comes from those optimizations. At that level, compiler
time is definitely comparable. If you're still skeptical, go post on
comp.compilers about how limited time isn't a factor in Java JITs.

> Sure. I'd like to know when/if languages with concurrent GC outperform those
> without. I expect concurrent GC to be faster when interprocess
> communication is high but I would like to quantify as much as possible.

I understood you incorrectly when you used the term "concurrent". The
way your discussion reads, it seems you're interested in parallelism,
and since the terminology is sometimes used that way, I assumed that's
what you were talking about. Let me clear up the terminology right
now:

Concurrent GC - Ways of of running collections concurrently with the
mutator
Parallel GC - Ways of parallelizing the collection to run in multiple
threads

The two are basically orthogonal to each other. A concurrent GC will
not generally increase the parallel throughput of multiple threads
that heavily and symmetrically use the GC. Indeed, it will generally
reduce throughput. However, it will decrease pause times
substantially, and can have secondary effects on throughput in
asymmetric cases (eg: one thread does not allocate, another thread
does). A parallel GC will increase the total throughput of multiple
threads by parallelizing the collection.

Both are orthogonal to your point about seperating processes. You can
run multiple threads in the same heap with a regular non-concurrent,
non-parallel GC. You can run a single thread with a concurrent or
parallel GC, though the latter only makes sense on a multi-processor
system.

> No, concurrent GCs run concurrently with the main thread so all programs
> have at least two threads.

This is one way to implement concurrent GCs, though it's not the only
way (see Hosking's ISMM06 paper). I'm not sure which version of the
JVM uses. However, it definitely does not use the concurrent GC unless
you explicitly ask for it. And you'd generally only ask for it to
improve pause times, not parallel throughput.

> Getting them to scale is certainly a hard problem but getting them to handle
> rapid allocation appears to be an unsolved problem. This is a serious
> problem for functional programming languages and is one of the reasons why
> OCaml reverted back to non-concurrent GC.

If pause times are not relevant, it doesn't make much sense at all to
use a concurrent GC. You're better-served with a parallel stop-the-
world GC.

> What static type information is available, e.g. parametric polymorphism?

Static type information about primitive types and aggregates thereof
is available, which is the only thing being tested in your benchmark.
Information about parametric types is not available statically, but
HotSpot wouldn't use it even if it was. Once you have a JIT with OSR,
it makes much more sense to do optimistic, run-time specialization of
functions. This drastically cuts down on the code-bloat induced by
statically specializing all polymorphic functions.

Incidentally, I don't believe that the O'Caml compiler uses
information about parametric types to specialize polymorhpic
functions. It could, like MLton, but I don't think it does.

> The whole program optimizations rely on a closed world anything dynamic
> breaks that.

Your claim was that you "lose the benefits of dynamic typing" if you
apply WPO. This is not true. You do indeed lose the benefits of
dynamic redefinition, but nothing stops you from supressing WPO for
development builds, and enabling it for production builds. You lose
the ability to dynamically redefine code in the final program, but if
20 sec versus 30 sec is really make-or-break for you, that's life.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cbq1fen4b644f@corp.supernews.com>
Rayiner Hashem wrote:
>> I was thinking of Java and .NET, both of which have had enormous
>> financial backing but neither are leaders in performance.
> 
> Both Java and .NET have constraints that C or ML do not. Most of these 
> constraints are imposed by the target markets of the languages.

Yes. For example, Windows doesn't support copy-on-write forking, so
threading and a concurrent GC are the only option for concurrent
programming under Windows. Consequently, both Java and .NET support only
threading and concurrent GC, putting them at a considerable disadvantage to
other languages under Linux and Mac OS X.

> Also, 
> given their target market, their performance requirements are
> different. CPU performance is less important, and I/O performance,
> multi-CPU scalability, etc, are more important.

IO performance is interesting. I only discovered this morning that .NET is
500x slower than OCaml at reading marshalled ("serialized") data, which I
found very surprising.

> Incidentally, the performance of .NET on computationally intensive
> benchmarks is actually quite good. You just have to avoid writing code
> that conses a lot.

Well, yes and no. You can probably get within 2x of OCaml for any program
on .NET but it can take a lot of work and you end up Greenspunning a
run-time environment that can handle fast allocation. I believe they could
address this by doing region analysis in .NET.

>> Getting them to scale is certainly a hard problem but getting them to
>> handle rapid allocation appears to be an unsolved problem. This is a
>> serious problem for functional programming languages and is one of the
>> reasons why OCaml reverted back to non-concurrent GC.
> 
> If pause times are not relevant, it doesn't make much sense at all to
> use a concurrent GC. You're better-served with a parallel stop-the-
> world GC.

Quite probably, yes. I'm not sure I have this option under .NET though.

>> What static type information is available, e.g. parametric polymorphism?
> 
> Static type information about primitive types and aggregates thereof
> is available, which is the only thing being tested in your benchmark.
> Information about parametric types is not available statically, but
> HotSpot wouldn't use it even if it was. Once you have a JIT with OSR,
> it makes much more sense to do optimistic, run-time specialization of
> functions. This drastically cuts down on the code-bloat induced by
> statically specializing all polymorphic functions.

Right.

> Incidentally, I don't believe that the O'Caml compiler uses
> information about parametric types to specialize polymorhpic
> functions. It could, like MLton, but I don't think it does.

OCaml does not specialize polymorphic functions or inline function arguments
to HOFs, yes. There was an interesting discussion on the OCaml mailing list
recently about optimizations that people would like OCaml to do.
Personally, I would love to see someone in industry pick up the functional
ball and run with it by producing a professional quality FPL, combining all
of the benefits of OCaml, MetaOCaml, Haskell, SML, Lisp, Scheme and so on
along with a modern development environment.

>> The whole program optimizations rely on a closed world anything dynamic
>> breaks that.
> 
> Your claim was that you "lose the benefits of dynamic typing" if you
> apply WPO. This is not true. You do indeed lose the benefits of
> dynamic redefinition, but nothing stops you from supressing WPO for
> development builds, and enabling it for production builds. You lose
> the ability to dynamically redefine code in the final program, but if
> 20 sec versus 30 sec is really make-or-break for you, that's life.

Yes, that is exactly what I meant. You have to sacrifice something
somewhere.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187376381.032332.274260@57g2000hsv.googlegroups.com>
> Yes. For example, Windows doesn't support copy-on-write forking, so
> threading and a concurrent GC are the only option for concurrent
> programming under Windows. Consequently, both Java and .NET support only
> threading and concurrent GC, putting them at a considerable disadvantage to
> other languages under Linux and Mac OS X.

What the heck are you talking about?

> Well, yes and no. You can probably get within 2x of OCaml for any program
> on .NET but it can take a lot of work and you end up Greenspunning a
> run-time environment that can handle fast allocation. I believe they could
> address this by doing region analysis in .NET.

.NET can probably do much better than O'Caml on benchmarks that don't
cons a lot. And where do you get the idea that improving the allocator
is "Greenspunning"???

> Quite probably, yes. I'm not sure I have this option under .NET though.

There are a command line option to disable the concurrent GC, though I
don't know what exactly it is that you're expecting to find.

If the .NET GC doesn't handle fast allocation as well as the O'Caml
GC, it's because it's not tuned for functional code. There are some
basic GC design trade-offs involved here...

Sure, you have to sacrifice something somewhere*, but you don't, as
you claim, have to give up all the benefits of dynamic typing.

*) Incidentially, you could also keep dynamic redefinition and dynamic
typing, and trade-off implementation complexity (and development $$$)
by incrementally maintaining the whole-program analysis and
dynamically recompiling code affected by a redefinition.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cbse3e47j7i54@corp.supernews.com>
Rayiner Hashem wrote:
>> Yes. For example, Windows doesn't support copy-on-write forking, so
>> threading and a concurrent GC are the only option for concurrent
>> programming under Windows. Consequently, both Java and .NET support only
>> threading and concurrent GC, putting them at a considerable disadvantage
>> to other languages under Linux and Mac OS X.
> 
> What the heck are you talking about?

Unix.fork.

>> Well, yes and no. You can probably get within 2x of OCaml for any program
>> on .NET but it can take a lot of work and you end up Greenspunning a
>> run-time environment that can handle fast allocation. I believe they
>> could address this by doing region analysis in .NET.
> 
> .NET can probably do much better than O'Caml on benchmarks that don't
> cons a lot.

Nothing is "much better than OCaml" in terms of performance.

> And where do you get the idea that improving the allocator
> is "Greenspunning"???  

Whether or not you call writing your own allocator Greenspunning, you've got
a lot of work to get similar performance on those platforms for many real
applications.

>> Quite probably, yes. I'm not sure I have this option under .NET though.
> 
> There are a command line option to disable the concurrent GC, though I
> don't know what exactly it is that you're expecting to find.

OCaml-like performance.

> If the .NET GC doesn't handle fast allocation as well as the O'Caml
> GC, it's because it's not tuned for functional code.

I'm not sure that .NET can be tuned for functional code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187378952.163755.45880@d55g2000hsg.googlegroups.com>
> Unix.fork.

First, this is wrong (Windows supports COW just fine), and second,
where do you get the idea that concurrent GC is the only alternative
to copy-on-write forking?

> Nothing is "much better than OCaml" in terms of performance.

This is bull.

> Whether or not you call writing your own allocator Greenspunning,

What the heck?

> you've got
> a lot of work to get similar performance on those platforms for many real
> applications.

I'd like to see how the O'Caml GC performsn on heavily imperative
business applications.

> OCaml-like performance.
>

I don't think you have the slightest idea how all this GC stuff fits
together. I'm not even sure at this point what you're trying to get at.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cc42043arts68@corp.supernews.com>
Rayiner Hashem wrote:
>> Unix.fork.
> 
> First, this is wrong (Windows supports COW just fine),

  http://cygwin.com/ml/cygwin/2002-04/msg01072.html

>> Nothing is "much better than OCaml" in terms of performance.
> 
> This is bull.

Evidence?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187395765.008102.66700@r29g2000hsg.googlegroups.com>
> > First, this is wrong (Windows supports COW just fine),
>
>  http://cygwin.com/ml/cygwin/2002-04/msg01072.html

NT's POSIX layer does indeed support fork. Windows Services for UNIX
supports it too.

And what makes you think you think fork() is in any way related to
concurrent programming? What's wrong with good-old CreateProcess()?

Basic facts: you can implement multi-process concurrency on any system
with IPC and multiple processes. Almost all OSs qualify here. You can
implement single-process, multi-threaded concurrency with any style of
GC.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cdhh7ght7k9b6@corp.supernews.com>
Rayiner Hashem wrote:
> And what makes you think you think fork() is in any way related to
> concurrent programming?

Because it forks a process that runs concurrently. Look at the designs of
JoCaml and OCamlp3l, for example.

> What's wrong with good-old CreateProcess()? 

That makes it a lot harder to share large immutable data structures
efficiently. Forking with copy-on-write automates it.

> Basic facts: you can implement multi-process concurrency on any system
> with IPC and multiple processes. Almost all OSs qualify here. You can
> implement single-process, multi-threaded concurrency with any style of
> GC.

Sure, but you're threads won't run concurrently on separate cores, CPUs or
machines. Forking with message passing is a simple way to exploit
parallelism from languages like OCaml under Linux and Mac OS X.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187454498.629498.131440@22g2000hsm.googlegroups.com>
> Because it forks a process that runs concurrently. Look at the designs of
> JoCaml and OCamlp3l, for example.

Obviously. But why fork() and not any of a number of other mechanisms
for creating processes?

> That makes it a lot harder to share large immutable data structures
> efficiently. Forking with copy-on-write automates it.

You can achieve the same effect by using an anonymous memory-mapped
area for the heap, then copy-on-write mapping it in the new process.

> Sure, but you're threads won't run concurrently on separate cores, CPUs or
> machines.

The OS will happily run multiple threads concurrently regardless of
the type of GC you have. Obviously the GC has to be thread safe, but
this is accomplished as easily as putting a mutex around the alloc
routine. A more reasonable implementation is to give each thread a
local allocation buffer, so the mutex only comes into play when the
allocation buffer fills up. This is how most multithreaded Lisp
systems do things.

Concurrent and parallel GCs come into play only when a GC is
happening. In a non-concurrent collector, all threads must be stopped
during a GC (or a increment of GC work). In a non-parallel collector,
the marking/copying/sweeping can only happen in a single thread.
Neither of these issues are going to really affect concurrency much on
a 2-4 core system, unless you spend excessive amounts of time in GC.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cev5hlvjnjv2a@corp.supernews.com>
Rayiner Hashem wrote:
>> Because it forks a process that runs concurrently. Look at the designs of
>> JoCaml and OCamlp3l, for example.
> 
> Obviously. But why fork() and not any of a number of other mechanisms
> for creating processes?

Because it already does what you need.

>> That makes it a lot harder to share large immutable data structures
>> efficiently. Forking with copy-on-write automates it.
> 
> You can achieve the same effect by using an anonymous memory-mapped
> area for the heap, then copy-on-write mapping it in the new process.

And somehow disable collection of the shared memory in the GCs of all
processes. And setup interprocess communication. Or you could just use
fork.

>> Sure, but you're threads won't run concurrently on separate cores, CPUs
>> or machines.
> 
> The OS will happily run multiple threads concurrently regardless of
> the type of GC you have. Obviously the GC has to be thread safe, but
> this is accomplished as easily as putting a mutex around the alloc
> routine. A more reasonable implementation is to give each thread a
> local allocation buffer, so the mutex only comes into play when the
> allocation buffer fills up. This is how most multithreaded Lisp
> systems do things.
> 
> Concurrent and parallel GCs come into play only when a GC is
> happening. In a non-concurrent collector, all threads must be stopped
> during a GC (or a increment of GC work). In a non-parallel collector,
> the marking/copying/sweeping can only happen in a single thread.
> Neither of these issues are going to really affect concurrency much on
> a 2-4 core system,

On my dual core system, allocation-intensive programs are typically 3-5x
slower in Java and .NET than in OCaml.

> unless you spend excessive amounts of time in GC. 

Which is exactly what functional programming does.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187559769.107025.237350@g4g2000hsf.googlegroups.com>
> The shared memory will be collected with fork but will leak with your
> approach unless you do something about it.

You just said a few posts ago that you wanted to disable collecting
the shared memory area.* Do you or don't you? You can accomplish
either effect within the shared memory approach, just by structuring
the runtime appropriately.

*) In theory, if the pre-fork state really is completely immutable,
and you GC right before the fork, all of that state will remain live
throughout the process. Any data live at the fork must be live because
it is pointed to by some pointer that exists before the fork. If all
pre-fork data is immutable, then none of this data can die after the
fork.

> I need to predict the performance of concurrent programs in each language
> and these are the approaches to concurrency used in each language.

BS. Neither O'Caml nor Java/.NET have built-in concurrency models.
They punt to the OS to provide concurrency. You can do multi-process
concurrency just as easily in Java/.NET as you can in O'Caml, and if
O'Caml can't do single-process concurrency well, it's just an
implementation limitation.

And your 3-5x figure "penalty" for concurrent GCs is bogus. You're
failing to sufficiently account for all of the variables in your
"experiment".
From: Markus E L
Subject: fork() and A call for rational discussion. Was: shootout: [...]
Date: 
Message-ID: <n4lkc7i1jx.fsf_-_@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> The shared memory will be collected with fork but will leak with your
>> approach unless you do something about it.
>
> You just said a few posts ago that you wanted to disable collecting
> the shared memory area.* Do you or don't you? You can accomplish
> either effect within the shared memory approach, 

> just by structuring the runtime appropriately.
  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Exactly: By changing the runtime. The fork()-approach to parallelism
on the other side is very cheap because it can be implemented on top
of an existing language as long as fork is available. It has
drawbacks.

> *) In theory, if the pre-fork state really is completely immutable,
> and you GC right before the fork, all of that state will remain live

That obviously is not how parellelism by fork() works.

> throughout the process. Any data live at the fork must be live because
> it is pointed to by some pointer that exists before the fork. If all
> pre-fork data is immutable, then none of this data can die after the
> fork.

In a related post nearby you scoffed 

> You should really learn the subject matter a bit better before making
> these sorts of sweeping generalizations.

My impression is that the same applies to you: Do us a favour and (a)
look up the differences between fork() and CreateProcess() and (b) try
to understand what people want to achive using fork() for achieving
parallelism (I already gave you a reference: The apache project which
has been using this approach extensively in apache 1.x days has (had?)
a number of documents on these issues). 

Actually I think we don't need another
this-approach-against-that-approach-war even if Jon gets suckered into
opposition against you (which is not so difficult given the amount of
not-quite-truth mixed into your posts). 

What we rather need are people not advocating one approach but
furthering understanding of the specific issues in each approach
(cost, feasability, limits). Again I'm wondering what you're even
trying to argue: That Jon is wrong? In whatever topic you can find?
I'm not interested. But if you can formulate a position WRT to
something actually on topic in c.l.f or c.l.l (like as I said Pros and
Cons of various approaches to certain problems) and succeed to argue
towards proving or supporting your position then you're welcome.

Understand me right: As with Andy Freeman your problem, to me, seems
to be that you argue for arguings sake, not any more towards a certain
aim like proving/supporting a certain hypothesis you formulated. You
don't have any aim more. This is, I'm tempted to say, rather
unscientific :-) -- since you don't hold a view that could be
expressed one or two paragraphs, nobody can actually disprove
you. Argument then detoriates into a style where people throw
interesting but only marginally related bits of trivia into the
discussion and end their argument with "so there!", but actually don't
close the argument back to their original hypothesis. Since the trivia
are often more or less right, its difficult to oppose their "argument"
(after all the argument is not wrong, only doesn't have any
consequence WRT to the original contested question or statement).

All this reminds me of the Chewbacca defense
http://en.wikipedia.org/wiki/Chewbacca_Defense. The arguments some
people here (you included) advanced, are like this: Certainly,
"Chewbacca lives on Endor". You said "so there". The only available
and useful response actually is "so what?". Because one either doesn't
see this at the beginning or it seems just to stupid to let it stay at
that, one responds, tries to put the discussion back to the original
topic and as a result the discussion drags on interminably (still
under the heading of "shootout: implementing an interpreter for a
simple procedural language Minim"). The simple fact that none of you
ever changed the subject speaks volumes in itself.

So my final appeal is: People, if you really do want to _solve_ any
disagreement in this discussion (or others), try to establish every
now and then what the topic is and what the point of view you're
advancing is and how your arguments relate to that. Everything else is
pure muddleheadedness.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cidbuh2h2akf5@corp.supernews.com>
Rayiner Hashem wrote:
>> The shared memory will be collected with fork but will leak with your
>> approach unless you do something about it.
> 
> You just said a few posts ago that you wanted to disable collecting
> the shared memory area.* Do you or don't you?

I want you to explain exactly how you will be collecting unused shared data
in your fork reimplementation for Windows.

> You can accomplish 
> either effect within the shared memory approach, just by structuring
> the runtime appropriately.

Think about what "restructuring the runtime" actually entails. Can you see
why this is prohibitively difficult compared to fork under Unix (a dozen
lines of code)?

> *) In theory, if the pre-fork state really is completely immutable,
> and you GC right before the fork, all of that state will remain live
> throughout the process. Any data live at the fork must be live because
> it is pointed to by some pointer that exists before the fork. If all
> pre-fork data is immutable, then none of this data can die after the
> fork.

No, fork copies the process so both the original and the clone are free to
collect data.

>> I need to predict the performance of concurrent programs in each language
>> and these are the approaches to concurrency used in each language.
> 
> BS. Neither O'Caml nor Java/.NET have built-in concurrency models.

Read and learn:

  http://www.albahari.com/threading/

> They punt to the OS to provide concurrency. You can do multi-process 
> concurrency just as easily in Java/.NET as you can in O'Caml, and if
> O'Caml can't do single-process concurrency well, it's just an
> implementation limitation.

No, its a trade off: single-process concurrency is also a limitation in
terms of performance.

> And your 3-5x figure "penalty" for concurrent GCs is bogus. You're
> failing to sufficiently account for all of the variables in your
> "experiment".

No, the maximum of a function is a constant.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187618686.731094.271000@r29g2000hsg.googlegroups.com>
> I want you to explain exactly how you will be collecting unused shared data
> in your fork reimplementation for Windows.

Say you have a process P0 that forks into P1 and P2.

- In P0, use CreateFileMapping to allocate a named, zero-fill region
- Use this as the heap for P0
- Fork P0 into P1 and P2
- In P1 and P2, use OpenFileMapping with FILE_MAP_WRITE
- In P1 and P2, use MapViewOfFile with FILE_MAP_COPY
- Use the mapped area as the process heap in P1 and P2
- Collect this mapped area using GC as you would normally

Changes to the shared heap in any of the processes will not be
reflected in any other process.

> Think about what "restructuring the runtime" actually entails. Can you see
> why this is prohibitively difficult compared to fork under Unix (a dozen
> lines of code)?

If your runtime isn't already geared for UNIX, it's not prohibitively
difficult at all. It's maybe a few dozen lines of code, courtesy of
Windows's ungodly verbosity.

> No, fork copies the process so both the original and the clone are free to
> collect data.

Obviously. My point is that if your inherited state is completely
immutable, and you GC right before the fork, none of the shared data
can ever die. You're not going to hit this in practice, because some
of the inherited state is indeed mutable (thread stacks and
registers), but for many applications it might be a decent
approximation.

Now that I think of it, you almost certainly _don't_ want to GC the
shared data if you can get away with it. Most conventional moving
collectors will quickly force a write-allocate of every live page the
first time it runs. The only way copy-on-write'ing the shared data
would help is if most of the shared data was dead right before the
fork, which can easily be avoided just by GC'ing before the fork.

>  http://www.albahari.com/threading/

What does that prove? That C# has library routines to access the
native thread package? Almost every language in existence has an
implementation that allows you to access the native threads package.
That's not a built-in concurrency model. A built-in concurrency model
is like what's in Erlang or Occam.

> No, the maximum of a function is a constant.

I'm not even going to try to figure out what that means. The point is
that your experiments are full of poor methodology. You're test
samples differ in multiple dimensions, your tests stress multiple
dimensions, and you fail to account for these facts in your
conclusions. Oh the irony, that someone on the theory side presents
"evidence" in the form of improperly-conducted experiments!
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cjg4kb9hsp184@corp.supernews.com>
Rayiner Hashem wrote:
> If your runtime isn't already geared for UNIX, it's not prohibitively
> difficult at all.

Let me know when you've finished reinventing fork (and pattern matching,
rewriting...).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187628528.406387.227590@r29g2000hsg.googlegroups.com>
On Aug 20, 12:14 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Rayiner Hashem wrote:
> > If your runtime isn't already geared for UNIX, it's not prohibitively
> > difficult at all.
>
> Let me know when you've finished reinventing fork (and pattern matching,
> rewriting...).

Get out of the "fork" mindset. You don't want fork, you want a way to
inherit a memory area copy-on-write. That's _exactly_ what the file
mapping mechanism in the NT kernel is supposed to be used for.

"It doesn't work exactly the way I'm used to, wah, wah, wah" is not an
legitimate argument.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <q0r6lqcqk3.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

> "It doesn't work exactly the way I'm used to, wah, wah, wah" is not an
> legitimate argument.

Is "I don't want to modify the language run time?". As I understand
the fork()-trick it's all about getting some kind of concurrency with
a high "initial" bandwith of communication to the subsidiary thread
(at fork time)[1] without having to modify the language runtime,
libraries and compiler[2]. [1] is something you don't get with
starting new programs (which is what CreateProcess does AFAIR) and [2]
is something which you don't get if you use any explicit file or
memory mappping mechanisms.

This is all not about something really profound and
fundamental. Basically it's about a trick, a very neat trick. And a
trick that doesn't work efficiently on Windows.  I think from that
point on it's useless to argue either, that "it works on windows too"
or "the trick is useless, because you can instead ...". It doesn't
work efficiently on windows and the requirement is that one doesn't
want to use another method (see [2]). 

My impression is that your opposition is partially fueled by a
conviction that this somehow is a slander on Windows. I don't think
so: It's just recognition of the fact that Win32 and POSIX are two
different worlds and that a mapping between them is only partially
possible. If it helps: Windows had efficient kernel space threads
earlier than most of the common Unix platforms AFAIR (and Unix people
were convinced that they don't need threads so much because they have
parallelism by fork() -- so one thing is actually a substitute for the
other, as afar as concepts go, but cannot be mapped on each other in
the the sense of a drop in replacement or implementing fork() over
threads or threads over fork()).

Regards  -- Markus


PS: Do you read on c.l.f -- if yes, I'd prefer the thread to continue
    there. If at all.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13d5kgpm9mjcse0@corp.supernews.com>
Markus E L wrote:
> Rayiner Hashem wrote:
>> "It doesn't work exactly the way I'm used to, wah, wah, wah" is not an
>> legitimate argument.
> 
> Is "I don't want to modify the language run time?". As I understand
> the fork()-trick it's all about getting some kind of concurrency with
> a high "initial" bandwith of communication to the subsidiary thread
> (at fork time)[1] without having to modify the language runtime,
> libraries and compiler[2]. [1] is something you don't get with
> starting new programs (which is what CreateProcess does AFAIR) and [2]
> is something which you don't get if you use any explicit file or
> memory mappping mechanisms.

To quantify this:

Fork: 1cs
Spawn thread: 1ms
Lock: 1us

Java and .NET are ~5x slower at allocation-intensive programs than OCaml.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187560597.439212.303530@w3g2000hsg.googlegroups.com>
> Because fork very cheaply _copies_ the an already existing process. If
> you want many pseudo-threads of the same type (e.g. worker threads for
> answering http requests concurrently) you just initialize a master
> copy (a process) up to a certain point and then fork as many threads
> as you like (often under control of a housekeeper process which
> decides how many threads are needed and when). This is much cheaper
> than creating a new process which would have to go through all the
> setup activity of language runtime, reading config files and so on,
> again.

If the overhead is substantial, keep a pool of worker processes around
and farm out work to them. Most high-performance server designs don't
use a large number of threads anyway --- they use only as many
processes/threads as there are CPUs on the machine, and use non-
blocking I/O (select(), kqueue, I/O completion ports) within each
one.

> Yes, certainly. It requires one to actually change the language
> runtime a good bit.

Is it Windows's fault that O'Caml's runtime was designed for a
different model?

> ... but this is a problem: MAnipulating Mutexes usually is a system
> call (execpt you can fashion a cheap user space mutex with
> instructions that are guaranteed not to be interrupted) and that is
> expensive.

I'm not seriously suggesting that as a solution. I'm just pointing out
that that's as complicated as you need to go in order to achieve
concurrency in a single-process situation.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <lyk5rrgknx.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Because fork very cheaply _copies_ the an already existing process. If
>> you want many pseudo-threads of the same type (e.g. worker threads for
>> answering http requests concurrently) you just initialize a master
>> copy (a process) up to a certain point and then fork as many threads
>> as you like (often under control of a housekeeper process which
>> decides how many threads are needed and when). This is much cheaper
>> than creating a new process which would have to go through all the
>> setup activity of language runtime, reading config files and so on,
>> again.
>
> If the overhead is substantial, keep a pool of worker processes around
> and farm out work to them. Most high-performance server designs don't

You're aware of the (additional) limitations of this method (which are
added to the limitations coming from the parallelism by separate
processes model)? At a certain point it quasi detoriates into a
client/compute-server model with hardly anything resembling
parallelism in your program.

> use a large number of threads anyway --- they use only as many
> processes/threads as there are CPUs on the machine, and use non-
> blocking I/O (select(), kqueue, I/O completion ports) within each
> one.

Not quite true, at least historically, in environments which are/were
only single threaded (e.g. POSIX 1 + AFAIR 2).

>> Yes, certainly. It requires one to actually change the language
>> runtime a good bit.
>
> Is it Windows's fault that O'Caml's runtime was designed for a
> different model?

Did anyone imply this? We're not granting brownie points to OSes on
basis of fault here. 

>> ... but this is a problem: MAnipulating Mutexes usually is a system
>> call (execpt you can fashion a cheap user space mutex with
>> instructions that are guaranteed not to be interrupted) and that is
>> expensive.

> I'm not seriously suggesting that as a solution. I'm just pointing out
> that that's as complicated as you need to go in order to achieve
> concurrency in a single-process situation.

Has anybody said its complicated? It is difficult to get an efficient
solution though.

Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xodh4aiys.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> Sure, but you're threads won't run concurrently on separate cores,
> CPUs or machines.

I think that's just an unfortunate limitation of ocaml--maybe they
will fix it sometime.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fa7ae9$pbu$1@aioe.org>
Paul Rubin escreveu:
> Jon Harrop <···@ffconsultancy.com> writes:
>> Sure, but you're threads won't run concurrently on separate cores,
>> CPUs or machines.
> 
> I think that's just an unfortunate limitation of ocaml--maybe they
> will fix it sometime.
As soon they figure out a type system for it perhaps? :-)
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cevf95f1sl2b@corp.supernews.com>
Paul Rubin wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>> Sure, but you're threads won't run concurrently on separate cores,
>> CPUs or machines.
> 
> I think that's just an unfortunate limitation of ocaml--maybe they
> will fix it sometime.

Fork already fixed it on Linux and Mac OS X. Assuming Java and .NET are
representative of what can be done, I would rather not incur a 5x slowdown
of all allocation intensive code just to support parallel threads when
parallelism is already available via fork.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xtzqwtlqu.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> Fork already fixed it on Linux and Mac OS X. Assuming Java and .NET are
> representative of what can be done, I would rather not incur a 5x slowdown
> of all allocation intensive code just to support parallel threads when
> parallelism is already available via fork.

The stuff I'm doing involves navigating and sometimes consing gigabyte
structures and I really don't want to have separate copies in each
process.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <kwsvrco8j.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Jon Harrop <···@ffconsultancy.com> writes:
>> > The stuff I'm doing involves navigating and sometimes consing
>> > gigabyte structures and I really don't want to have separate
>> > copies in each process.
>> 
>> You don't have separate copies in each process. That's the point: a
>> Unix OS automates the sharing of your data.
>
> I don't understand this at all.  
>
> 1. Say I have some big structure in memory, then fork.  Now there are
> two processes that can both read that structure.  They can both make
> and release pointers to the structure as they run.  Maybe eventually,
> all the pointers are released.  How does the GC know it can free the
> structure?

Both processes have their own copy of the structure, but a "virtual"
copy because of COW.

> 2. I thought Unix fork resulted in copy-on-write.  So what happens if

Yes.

> both processes want to modify the existing structure, or cons new
> structure that the other process can then access?

There is no shared data with fork()-parallelism. This can also be seen
as an advantage (e.g. isolation of processes) in certain application
scenarios.

> Maybe we need some faster type of IPC, like x86 call gates directly
> between user processes, instead of using sockets and system calls.

Everything is possible. But (1) if it is processor dependent it won't
port efficiently to other architectures and (2) AFAIR x86 call gates
are everything but fast :-).

Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xsl6d76us.fsf@ruckus.brouhaha.com>
Joachim Durchholz <··@durchholz.org> writes:
> > Say I have some big structure in memory, then fork.  Now there
> > are two processes that can both read that structure.  They can
> > both make and release pointers to the structure as they run.
> > Maybe eventually, all the pointers are released.  How does the GC
> > know it can free the structure?
> 
> GC would run in each process separately. As soon as the released
> memory is being overwritten in one process, it will be copied-on-write.

Err, I'm troubled by this--the idea of GC is to manage the limited
amount of physical ram in the machine--if it weren't limited, we
wouldn't need GC.  And if the per-process GC can't know how much ram
is actually in use then it can't really manage it.  As an extreme
example, say it's a 10GB structure initially living in 100 forked
processes.  Now 99 of them release their pointers to it, so the 99
per-process GC's each think they have 10GB of free ram to allocate
from, even though the whole machine might only have 16GB or something
like that.  This doesn't sound so good.  Ram really has to be a shared
resource like disk space.

> If it it multi-gigabyte and must exist only once, you simply set up a
> process (or thread) that manages that data structure once and for all;
> if it is multi-gigabyte and you need to mutate it separately, nothing
> in the world will help you avoid copying it eventually; if it is
> multi-gigabyte and you don't need mutation, you can place it in a
> separate heap and share it.

Yeah, I think not-much-mutation is the usual case.  But I don't see
how to allocate separate heaps and share them in most high level
languages.

A typical example might be a DB or search engine that gets a lot of
queries that tend to be similar.  A query might involve selecting
several sets of a few million rows each, then intersecting them or
doing some calculations on them.  Then the next query might select the
same sets but do a different operation.  So you want to cache the
intermediate result sets in a way that's visible to all the processes.
What now?  

I haven't actually used any of them but it seems to me that the
traditional threading solutions with locks aren't too terrible for
this type of thing; something like STM seems to handle it quite
cleanly; but share-nothing concurrency or pipes/sockets involves
serializing some enormous sets to pass back and forth needlessly.

> > Maybe we need some faster type of IPC, like x86 call gates directly
> > between user processes, instead of using sockets and system calls.
> 
> Call gates wouldn't be faster. The overhead isn't in calling into
> another process (system calls and sockets can do that just fine), it's
> in synchronization overhead, locks, potential priority inversion, and
> other high-level effects.

Well, the idea is to avoid some serialization and also to avoid
waiting for scheduler time slicers.  E.g. you want to make a million
lookups (adaptively--you can't batch them) while some other process is
doing something CPU intensive.  You end up relinquishing the CPU for
an entire timeslice a million times, when you could have done the
whole calculation in just one or so slices.

> (Shared-nothing message passing avoids all these effects quite
> nicely. Erlangistas attribute the overall speed and robustness of
> Erlang programs to this property.)

I wonder how well that holds up with multiple cpu's.  I should read
about Mnesia though, it may very well be better than the postgres
madness that we're currently using even though we're coding in Python.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cl26epun9gt01@corp.supernews.com>
Paul Rubin wrote:
> Err, I'm troubled by this--the idea of GC is to manage the limited
> amount of physical ram in the machine--if it weren't limited, we
> wouldn't need GC.  And if the per-process GC can't know how much ram
> is actually in use then it can't really manage it.  As an extreme
> example, say it's a 10GB structure initially living in 100 forked
> processes.  Now 99 of them release their pointers to it, so the 99
> per-process GC's each think they have 10GB of free ram to allocate
> from, even though the whole machine might only have 16GB or something
> like that.  This doesn't sound so good.  Ram really has to be a shared
> resource like disk space.

Only if your GC assumes that deallocating n bytes makes n bytes available,
which is a flawed assumption under many more conditions than fork (e.g.
virtual memory, virtualized OS).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xbqd1mlbc.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> Only if your GC assumes that deallocating n bytes makes n bytes available,

If it doesn't make any assumptions and operates independently of the
real situation, it's no better off.  If it does know the real
situation, it's no longer share-nothing concurrency, since that
knowledge has to be spread around the different processes.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13clg4vqteo3721@corp.supernews.com>
Paul Rubin wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>> Only if your GC assumes that deallocating n bytes makes n bytes
>> available,
> 
> If it doesn't make any assumptions and operates independently of the
> real situation, it's no better off.

Absolutely, a GC is bound to make some assumptions but making that one would
be silly.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x7inog84q.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> >> Only if your GC assumes that deallocating n bytes makes n bytes
> >> available,
> 
> ... a GC is bound to make some assumptions but making that one would
> be silly.

So we're back to the question of what a real GC should actually do.

Anyway I wonder how many GC's =don't= assume that deallocating n
bytes makes n bytes available.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cm71ct0589k7f@corp.supernews.com>
Paul Rubin wrote:
> Anyway I wonder how many GC's =don't= assume that deallocating n
> bytes makes n bytes available.

I doubt any assume that. There are so many things at play on a modern system
that such assumptions are dubious in a wide variety of circumstances. GCs
often benefit from some notion of memory pressure but many perform quite
well with very little information to go on.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xd4xgbyil.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> > Anyway I wonder how many GC's =don't= assume that deallocating n
> > bytes makes n bytes available.
> 
> I doubt any assume that. There are so many things at play on a modern system
> that such assumptions are dubious in a wide variety of circumstances. GCs
> often benefit from some notion of memory pressure but many perform quite
> well with very little information to go on.

In that case, dealing with memory pressure is left up to the OS's
virtual memory system.  Ugh.  
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cmbbgkielr190@corp.supernews.com>
Paul Rubin wrote:
> Jon Harrop <···@ffconsultancy.com> writes:
>> > Anyway I wonder how many GC's =don't= assume that deallocating n
>> > bytes makes n bytes available.
>> 
>> I doubt any assume that. There are so many things at play on a modern
>> system that such assumptions are dubious in a wide variety of
>> circumstances. GCs often benefit from some notion of memory pressure but
>> many perform quite well with very little information to go on.
> 
> In that case, dealing with memory pressure is left up to the OS's
> virtual memory system.  Ugh.

I think there are some strong arguments for putting GC in the OS. I wonder
if .NET will move in that direction...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fafemf$9e2$1@online.de>
Jon Harrop schrieb:
> I think there are some strong arguments for putting GC in the OS. I wonder
> if .NET will move in that direction...

Difficult to do well. Different execution models can vary wildly with 
their GC requirements.

Examples:

Some hand-allocate. (Unmanaged C.)
Some have a large heap. (Managed C, Java, etc.)
Some put everything in the heap, including the call stack. (Some more 
exotic language implementations.)
Others would need a per-thread/per-process heap (Erlang).

Well, OK, MS never was particularly well-known for trying to achieve the 
best solution for many cases, they usually opted for doing one thing 
reasonably well and neglecting everything else (regularly drawing flame 
from those parties who happened to need some of that "everything else" 
stuff).

Regards,
Jo
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ANWdnXQUZdgyb1bbnZ2dnUVZ_tCrnZ2d@comcast.com>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> I think there are some strong arguments for putting GC in the OS. I 
>> wonder
>> if .NET will move in that direction...
> 
> Difficult to do well. Different execution models can vary wildly with 
> their GC requirements.
> {stuff deleted}

I would not be surprised if there were hooks into the virtual memory 
subsytem to avoid needless paging, and other virtual memory tuning for 
the GC, but beyond that support, I think it makes little sense to do 
anything special in the OS.

It's more likely the CLR will become more independent of OS than the 
reverse.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2q3ayczlac.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Jon Harrop <···@ffconsultancy.com> writes:
>> > Anyway I wonder how many GC's =don't= assume that deallocating n
>> > bytes makes n bytes available.
>> 
>> I doubt any assume that. There are so many things at play on a modern system
>> that such assumptions are dubious in a wide variety of circumstances. GCs
>> often benefit from some notion of memory pressure but many perform quite
>> well with very little information to go on.
>
> In that case, dealing with memory pressure is left up to the OS's
> virtual memory system.  Ugh.  

Like the CPU memory is a shared resource: Virtualizing it and
distributing it is a task falling to the OS. The way that happens in
present day OSes is certainly ugly, though: Instead of blocking until
the resource "memory" becomes available, the process gets an error
even if it is not the culprit.

Regards -- Markus
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187798598.269672.99190@x40g2000prg.googlegroups.com>
On Aug 21, 9:57 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Paul Rubin wrote:
> > Anyway I wonder how many GC's =don't= assume that deallocating n
> > bytes makes n bytes available.
>
> I doubt any assume that.

I've never heard of one that doesn't.  What would you have it do
instead?  As far as I am aware, all the collectors I have ever used
depend completely on the virtual memory abstraction; I have never seen
any indication otherwise, and I am no stranger to GC tuning.

> There are so many things at play on a modern system
> that such assumptions are dubious in a wide variety of circumstances.

True, but not generally considered the GC's problem.  If you need more
RAM because you're running more processes, you have little recourse
but to buy more RAM.

-- Scott
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cp11rciqi0p71@corp.supernews.com>
Scott Burson wrote:
> On Aug 21, 9:57 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Paul Rubin wrote:
>> > Anyway I wonder how many GC's =don't= assume that deallocating n
>> > bytes makes n bytes available.
>>
>> I doubt any assume that.
> 
> I've never heard of one that doesn't.  What would you have it do
> instead?

Request from the OS how much allocable space is available when it needs to
know rather than relying on inference from previous deallocations.

> As far as I am aware, all the collectors I have ever used 
> depend completely on the virtual memory abstraction; I have never seen
> any indication otherwise, and I am no stranger to GC tuning.

You may be interested in this discussion about a research GC that is tied to
the VM manager to improve its paging properties and avoid thrashing:

  http://lambda-the-ultimate.org/node/2391

I believe OCaml under Linux and .NET do not do anything similar given that
my programs all grind to a halt immediately when they run out of physical
RAM.

>> There are so many things at play on a modern system
>> that such assumptions are dubious in a wide variety of circumstances.
> 
> True, but not generally considered the GC's problem.  If you need more
> RAM because you're running more processes, you have little recourse
> but to buy more RAM.

If you're running one process in one OS on one machine then yes. If you're
running several programs in different operating systems on virtualized
machines then there is a huge difference between the logical RAM your
programs can claim at any given instant and the physical RAM you can buy.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187933668.385615.8750@l22g2000prc.googlegroups.com>
On Aug 22, 11:34 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Scott Burson wrote:
> > On Aug 21, 9:57 am, Jon Harrop <····@ffconsultancy.com> wrote:
> >> Paul Rubin wrote:
> >> > Anyway I wonder how many GC's =don't= assume that deallocating n
> >> > bytes makes n bytes available.
>
> >> I doubt any assume that.
>
> > I've never heard of one that doesn't.  What would you have it do
> > instead?
>
> Request from the OS how much allocable space is available when it needs to
> know rather than relying on inference from previous deallocations.

Well, it isn't necessarily trivial for the OS to decide how to answer
such a question.  But never mind that.  I agree this could be a useful
line of research.  But the operative point for this discussion is,
I've never heard of any system call in Linux or Solaris by which the
kernel could be asked this question and provide an answer.  So I was
asking the question in a here-and-now practical sense: there isn't
currently a way to do this on any OS with which I am familiar.

> > As far as I am aware, all the collectors I have ever used
> > depend completely on the virtual memory abstraction; I have never seen
> > any indication otherwise, and I am no stranger to GC tuning.
>
> You may be interested in this discussion about a research GC that is tied to
> the VM manager to improve its paging properties and avoid thrashing:
>
>  http://lambda-the-ultimate.org/node/2391

That's great, I hope it works out, but you were speculating that this
is common practice now, which AFAIK -- and I probably would know -- it
is not.

> I believe OCaml under Linux and .NET do not do anything similar given that
> my programs all grind to a halt immediately when they run out of physical
> RAM.

There you are.

-- Scott
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13ct0vgfdm4ume1@corp.supernews.com>
Scott Burson wrote:
> On Aug 22, 11:34 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> Request from the OS how much allocable space is available when it needs
>> to know rather than relying on inference from previous deallocations.
> 
> Well, it isn't necessarily trivial for the OS to decide how to answer
> such a question.  But never mind that.  I agree this could be a useful
> line of research.  But the operative point for this discussion is,
> I've never heard of any system call in Linux or Solaris by which the
> kernel could be asked this question and provide an answer.  So I was
> asking the question in a here-and-now practical sense: there isn't
> currently a way to do this on any OS with which I am familiar.

AFAIK, many GCs use some notion of memory pressure that is calculated in
such a way.

>> You may be interested in this discussion about a research GC that is tied
>> to the VM manager to improve its paging properties and avoid thrashing:
>>
>>  http://lambda-the-ultimate.org/node/2391
> 
> That's great, I hope it works out, but you were speculating that this
> is common practice now, which AFAIK -- and I probably would know -- it
> is not.

Then you misunderstood "research" as "common practice now".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187972219.644405.321060@x35g2000prf.googlegroups.com>
On Aug 23, 11:57 pm, Jon Harrop <····@ffconsultancy.com> wrote:

> Then you misunderstood "research" as "common practice now".

Did I?

> > On Aug 21, 9:57 am, Jon Harrop <····@ffconsultancy.com> wrote:
> >> Paul Rubin wrote:
> >> > Anyway I wonder how many GC's =don't= assume that deallocating n
> >> > bytes makes n bytes available.

> >> I doubt any assume that.

I see no mention of research here.

-- Scott
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cufjvjdc58i80@corp.supernews.com>
Scott Burson wrote:
> ...
> I see no mention of research here.

The context was quoted in full in my last post including the "research" bit.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Scott Burson
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187989821.473146.24670@i13g2000prf.googlegroups.com>
On Aug 24, 1:13 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Scott Burson wrote:
>> > On Aug 21, 9:57 am, Jon Harrop <····@ffconsultancy.com> wrote:
>> >> Paul Rubin wrote:
>> >> > Anyway I wonder how many GC's =don't= assume that deallocating n
>> >> > bytes makes n bytes available.
>> >> I doubt any assume that.
> > I see no mention of research here.
>
> The context was quoted in full in my last post including the "research" bit.

I guess you have forgotten the context.  I suggest you review the
exchange you were having with Paul on the 20th and 21st.  You were
clearly claiming, with no reference to research, that it was common
practice for collectors to receive and use information about overall
system memory pressure.  In fact, here's another quote of yours:

>Absolutely, a GC is bound to make some assumptions but making >that one would be silly.

You've even agreed that the collector you use the most does make that
assumption.

-- Scott
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13cuq67gdpb3add@corp.supernews.com>
Scott Burson wrote:
> I guess you have forgotten the context.

It certainly got a bit confusing when you started splicing different
conversations together...

> I suggest you review the exchange you were having with Paul on the 20th
> and 21st. 

So you meant to reply to a post that didn't explicitly state "research"?

> You were clearly claiming, with no reference to research, that it was
> common practice for collectors to receive and use information about
> overall system memory pressure. 

Yes.

> In fact, here's another quote of yours: 
> 
> > Absolutely, a GC is bound to make some assumptions but making that one
> > would be silly.

Yes.

> You've even agreed that the collector you use the most does make that
> assumption.

No.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <tq8x84zlf9.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> Jon Harrop <···@ffconsultancy.com> writes:
>> >> Only if your GC assumes that deallocating n bytes makes n bytes
>> >> available,
>> 
>> ... a GC is bound to make some assumptions but making that one would
>> be silly.
>
> So we're back to the question of what a real GC should actually do.
>
> Anyway I wonder how many GC's =don't= assume that deallocating n
> bytes makes n bytes available.

What does 'making ... available' mean in th context of virtual memory?
The only thing one can claim (IMHO) is that the process is not using
those N bytes any more. 

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fae6l9$5os$1@online.de>
Paul Rubin schrieb:
>> (Shared-nothing message passing avoids all these effects quite
>> nicely. Erlangistas attribute the overall speed and robustness of
>> Erlang programs to this property.)
> 
> I wonder how well that holds up with multiple cpu's. 

No problems with that. In Erlang, each process has its own heap and its 
own GC, so having multiple CPUs doesn't affect the performance of single 
threads.
Message passing might be slower because the message might have to be 
written by one CPU and read by another, forcing the message data lower 
into the cache hierarchy.

 > I should read
> about Mnesia though, it may very well be better than the postgres
> madness that we're currently using even though we're coding in Python.

Mnesia is quite lightweight from what I have read.
It can be because it's optimized for keeping everything in memory. 
Erlang is designed for perpetual processes, so the database need not be 
efficient for those operations that make the data persistent.
Of course, Mnesia also eliminates the impedance mismatch. (The downside 
is that using it with any other language than Erlang would reintroduce 
the mismatch.)

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13clg7athtn7924@corp.supernews.com>
Joachim Durchholz wrote:
> Paul Rubin schrieb:
>>> (Shared-nothing message passing avoids all these effects quite
>>> nicely. Erlangistas attribute the overall speed and robustness of
>>> Erlang programs to this property.)
>> 
>> I wonder how well that holds up with multiple cpu's.
> 
> No problems with that. In Erlang, each process has its own heap and its
> own GC, so having multiple CPUs doesn't affect the performance of single
> threads.

Right. Distributed supercomputers do the same thing but in a harder way. It
is still feasible though.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13ckus48ip0v5fe@corp.supernews.com>
Paul Rubin wrote:
> 1. Say I have some big structure in memory, then fork.  Now there are
> two processes that can both read that structure.

Yes.

> They can both make and release pointers to the structure as they run.

Yes.

> Maybe eventually, all the pointers are released.  How does the GC know it
> can free the structure? 

Firstly, there are two GCs running in two separate processes because the GC
got forked. Secondly, fork is a higher-level construct that this which is
transparent to the process (and its GC), so each GC is unaware of the
other.

> 2. I thought Unix fork resulted in copy-on-write.  So what happens if
> both processes want to modify the existing structure,

Modifications to the structure transparently copy part of it before being
updated, so the other process is unaware.

> or cons new structure that the other process can then access?

Inter-process communication is usually performed using pipes rather than
direct memory access.

> Maybe we need some faster type of IPC, like x86 call gates directly
> between user processes, instead of using sockets and system calls.

There are certainly many ways to emulate this behaviour but they are all far
from trivial. This approach is not only integrated into Unix, it will also
be optimized because it is already ubiquitous.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187560293.500457.245540@k79g2000hse.googlegroups.com>
> Those are living AFAIR in different subsystems. How ever they do it
> (fork() I mean), Win32 doesn't have aviable fork().

If you want fork(), use the POSIX subsystem. If you want Win32, use
CreateProcess(). I don't see what's so hard about this.

> Read up about the way apache 1.3 simulated threading by forking.

Obviously you *can* use fork() to do concurrent programming. What I
was asking is why Harrop took an "if and only if" stance wrt fork and
concurrent programming.

> CreateProcess corresponds to fork/exec. This is by far not as
> efficient as fork() to create subsidiary workers from a master copy
> (again: look up how Apache 1.3 did it).

If you're creating worker-threads often, this might be an issue. For
something like Harrop's raytracer, it isn't. I'm actually not a fan of
Windows's process model myself, but what Harrop basically said was
along the lines of "the JVM/CLR have to use concurrent GC with a 3-5x
performance hit because Windows doesn't have fork()". This is just BS.

> Basic facts: Efficiency matters. More basic facts: fork() provides an
> opportunity to optimize.

If your process-creation overhead is a bottleneck, you should create
processes less often. Using a pool of pre-created worker processes can
allow this, for example.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <wrodh3gkzl.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Those are living AFAIR in different subsystems. How ever they do it
>> (fork() I mean), Win32 doesn't have aviable fork().
>
> If you want fork(), use the POSIX subsystem. If you want Win32, use
> CreateProcess(). I don't see what's so hard about this.

The hard thing is the note you're striking here. If _you_ want to
understand the difference between fork() and CreateProcess() and why
one would like to use fork() in some cases and why it is so difficult
to implement fork() on top of CreateProcess() and finally why one
would like to stay in the Win32 subsystem (hint: no access to the GUI
or anything living in Win32 from the POSIX subsys) -- for this I'd
like to refer you to teh CygWin project (actually I'm sure almost the
first thing they will do, will be to flame your head of "BTAM").


> Obviously you *can* use fork() to do concurrent programming. What I
> was asking is why Harrop took an "if and only if" stance wrt fork and
> concurrent programming.

Effiency (for the ~3rd time, Mr Hashem).


>> CreateProcess corresponds to fork/exec. This is by far not as
>> efficient as fork() to create subsidiary workers from a master copy
>> (again: look up how Apache 1.3 did it).

> If you're creating worker-threads often, this might be an issue. For
> something like Harrop's raytracer, it isn't. 

Did we restrict the discussion to Jon Harrops raytracer suddenly? Yes,
of course, then you're right. And only if there is no substantial data
propagation from master to worker slaves (more expensive over sockets,
cheap by fork(), again).

> I'm actually not a fan of Windows's process model myself, but what
> Harrop basically said was along the lines of "the JVM/CLR have to
> use concurrent GC with a 3-5x performance hit because Windows
> doesn't have fork()".

Did he? We could of course discuss endlessly now about the new topic
what JH purportedly "basically said". I'm not interested though,
considering that you don't seem to grasp some of the issues involved
here and very probably misunderstood what has been said only cursorily.

> This is just BS.

Most important thing here is that your absolutely sure of this.

Enough said.

-- Markus





>
>> Basic facts: Efficiency matters. More basic facts: fork() provides an
>> opportunity to optimize.
>
> If your process-creation overhead is a bottleneck, you should create
> processes less often. Using a pool of pre-created worker processes can
> allow this, for example.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13ciee3ntbadlfa@corp.supernews.com>
Rayiner Hashem wrote:
>> Those are living AFAIR in different subsystems. How ever they do it
>> (fork() I mean), Win32 doesn't have aviable fork().
> 
> If you want fork(), use the POSIX subsystem. If you want Win32, use
> CreateProcess(). I don't see what's so hard about this.

Unix provides both fork and threads for concurrency. Win32 provides threads
and not fork.

>> CreateProcess corresponds to fork/exec. This is by far not as
>> efficient as fork() to create subsidiary workers from a master copy
>> (again: look up how Apache 1.3 did it).
> 
> If you're creating worker-threads often, this might be an issue. For
> something like Harrop's raytracer, it isn't. I'm actually not a fan of
> Windows's process model myself, but what Harrop basically said was
> along the lines of "the JVM/CLR have to use concurrent GC with a 3-5x
> performance hit because Windows doesn't have fork()". This is just BS.

Java and .NET were designed to run under Windows, so they could not rely
upon fork for concurrency and use threads with concurrent GC instead. Their
concurrent GCs impose a severe performance hit on allocation. This
performance hit makes both Java and .NET up to 5x slower than OCaml on
allocation intensive code in practice.

>> Basic facts: Efficiency matters. More basic facts: fork() provides an
>> opportunity to optimize.
> 
> If your process-creation overhead is a bottleneck, you should create
> processes less often. Using a pool of pre-created worker processes can
> allow this, for example.

How is that related to Markus' point?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187618821.262755.235070@w3g2000hsg.googlegroups.com>
> > If your process-creation overhead is a bottleneck, you should create
> > processes less often. Using a pool of pre-created worker processes can
> > allow this, for example.
>
> How is that related to Markus' point?

fork() is a way to make process creation fast. Keeping pools of worker
processes is a way to avoid the need for fast process creation.
From: Markus E L
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <37vebakzis.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> > If your process-creation overhead is a bottleneck, you should create
>> > processes less often. Using a pool of pre-created worker processes can
>> > allow this, for example.
>>
>> How is that related to Markus' point?
>
> fork() is a way to make process creation fast. Keeping pools of worker
> processes is a way to avoid the need for fast process creation.

Not quite: fork() is a way to make process cloning fast. A slight
difference when compared with CreateProcess(). Exec() which
complements fork() WRT to process creatin mihgt not be very fast (in
comparison).

You've got a point regarding the suggestion that a worker pool is
another way to acchieve efficiency in slightly different
circumstances. But IMHO you're still missing out on the special properties
of fork(): Keeping a pool of workers is not exactly equivalent to
having fork().

Regards -- Markus
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Ifrui.21370$j7.383805@news.indigo.ie>
Paul Rubin wrote:

>  Therefore, IMO, "this
> function returns an integer for every test input that I happened to
> think of" is a much less valuable proposition than "this function
> returns an integer for every input as shown by static analysis".

FWIW, I'm not sure you (and some other non-lispers, and by the sounds of
it, certain lispers, though many others will be yawning) are up to
speed on the abilities of the more modern lisp compilers. (N.B. I am
NOT saying the current situation is perfect in lisp, IMHO it might well
be worthwhile to revise/formalise the type system, but static/dynamic
is just when type checking happens, "static typing" and "formal type
system" are not the same thing.)

http://www.sbcl.org/manual/Handling-of-Types.html#Handling-of-Types
http://common-lisp.net/project/cmucl/doc/cmu-user/compiler.html#toc125
(N.B. the CMUCL lisp compiler was named "Python" before the language 
Python ever appeared.  CMUCL's manual is more in-depth than SBCL
and still mostly-applicable to SBCL.)

Here's a trivial example of a type interaction with a current
lisp compiler (SBCL) (barely scratches surface of what they do):

; not quite a datatype... but still, merrily statically checked...
(deftype expr () '(or num add sub))  
(defstruct (num (:constructor num (x))) 
        (x 0 :type integer))
(defstruct (add (:constructor add (x y)))
        (x nil :type expr) (y nil :type expr))
(defstruct (sub (:constructor sub (x y))) 
        (x nil :type expr) (y nil :type expr))

; not necessary for this example to work, just useful
(declaim (freeze-type num add sub))

; declare function types narrowly to 
; gain maximum benefit from static checking abilities 
; of compiler - see CMUCL manual...
(declaim (ftype (function (expr) integer)
          blah bloo)) 

(defun blah (a)  ; warns at compile time.
  (typecase a
        (num 1)
        (add 2)))

(defun bloo (a)  ; doesn't warn
  (typecase a
        (num 1)
        (add 2)
        (sub 3)))


blah warns at compile time (as a practical matter, the warning could be
more helpful, but that's another issue) -  since the typecase is not
exhaustively covering expr, the compiler can't derive that the 
return is always an integer like the ftype asserts.  Rather than 
compilation failing though (unless you tell it to fail on warnings of
course), a check potentially signalling a runtime type error will be
inserted, since any declarations-as-assertions  not  statically proven
to always hold  are dynamically checked under the default policy - i.e.
blah always returns an int or signals an error in a well-defined
manner, it doesn't paint your socks or something. 

Point of interest: if you use etypecase or ctypecase, the compiler 
(perhaps surprisingly) _won't_ warn.  Why?  It kind of makes sense -
etypecase and ctypecase already have well-defined run time error
behaviour, so the function won't be returning in the uncovered cases,
it'll either be erroring out or having some replacement value 
supplied by a handler for a correctable type error condition.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uc7io74c02.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Rayiner Hashem wrote:

>>>> Sure, and if those assertions can be handled by a type theory then you
>>>> can write them in that form and have them checked before the program
>>>> runs so you can be sure the "assertion" will never trigger.

>>> That's a big "if". 

>> No, it's the most common case.

> I'm done with unsubstantiated claims. 

So don't make them.

> Provide some evidence of this claim, or drop it.

Rather simple: If you ask sombody what his or her function does, the
first thing he or she says is: "It takes a thingy of type X or
structure Z, with the following invariants and ...".

Example: "Parse URL takes a string which represents an URL and parses
it into protocol, server and path information"

 - "takes a string"           -> "of type X"
 - "which represents an URL"  -> "with the following invariants" [1]

As an aside: Static typing only "tracks" not "calculates
invariants". I say this that I'm not accused that I said that [1]
was a part of the type in question.

Now not that almost every interface specification, every specification
of what a function does, starts with "It takes a thinkies of type(s)
...".

In a dynamically typed language you'd have to write them as assertions
at the beginning of the function, like

  (assert (typep s 'string))

In a statically typed language that is superfluous. -- QED.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhgsv5vfq4708@corp.supernews.com>
Rayiner Hashem wrote:
> I understand that, and types are used in dynamically-typed languages
> for the exact same purposes.

No, they aren't. Most static languages with type inference allow you to
print the inferred type information. You cannot do that in a dynamically
typed language. Look at the "subst" example I gave earlier in this thread.

> It's hard to explain to someone who 
> doesn't believe that dynamically-typed languages have types at all,
> but structure and class abstractions are used in Lisp the same way
> type abstractions are in statically-typed languages.

No, they aren't. Look at the difference integrating pattern matching with a
type system makes, for example.

> Thus, differences in reliability stem from the extent to which testing
> can verify that incorrect types will not appear along some data-flow
> path in the program. Testing cannot prove that incorrect types will
> not appear along some path in some, but it can make that assertion
> with an arbitrarily high degree of confidence.

In theory, yes. In practice, it takes too long to test any non-trivial
program against all possible inputs.

> Dynamic-typing proponents have pointed out that they, in practice, do
> not often encounter type errors that escape their testing frameworks.
> This claim should not be dismissed off-hand, especially because there
> is little empirical evidence to suggest that large systems written in
> Lisp are less reliable than their counterparts written in ML or
> Haskell!

You are assuming that "type" means the same thing in dynamic and static
languages, which it does not.

> There is a possible theoretical justification for this observation.
> Data collected in the context of type inference* research in dynamic
> languages suggests that the vast majority of data flow paths in
> programs, even in highly dynamic languages like Smalltalk and Self,
> are monomorphic. This suggests that if an incorrect data type appears
> along some data flow path in the program, it will do so, with very
> high probability, in all executions that exercise that path. That
> would explain why Lisp programmers observe very few type errors
> escaping their test frameworks.

Again, only for an incomparable definition of "type".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186526042.736925.221660@o61g2000hsh.googlegroups.com>
> No, they aren't. Most static languages with type inference allow you to
> print the inferred type information. You cannot do that in a dynamically
> typed language. Look at the "subst" example I gave earlier in this thread.

My statement was in the context of the date example. I meant that
types in dynamically-typed languages are used to describe the sorts of
constraints presented in that example.

> No, they aren't. Look at the difference integrating pattern matching with a
> type system makes, for example.

You can integrate pattern matching into the type system in a
dynamically-typed language just fine. Cecil's predicate dispatch
mechanism, for example, subsumes ML-style pattern-matching.

> In theory, yes. In practice, it takes too long to test any non-trivial
> program against all possible inputs.

You don't need to test against all possible inputs in order to be
sufficiently confident that the program will be reliable. Do you think
Boeing tests a wing against all possible airloads, or at all possible
combinations of temperature/pressure/humidity/etc?

> You are assuming that "type" means the same thing in dynamic and static
> languages, which it does not.

My argument isn't predicated on that assumption at all.

Consider the date example. You have a type date, with a constructor
make-date. The only operation on dates is add-dates which is (date,
date) -> date. If make-date ensures that the dates are properly
formatted, and add-dates, given two properly-formatted dates, returns
a properly formatted date, then you can infer that any date that
arises in the typing of the program's expressions will be properly
formatted.

In a dynamic language in the equivalent situation (with a date class
and a closed add-dates method), a similar dynamic guarantee holds.
Specifically, any date that occurs dynamically will be properly
formatted. A dynamic occurrence of a value may or may not be a date,
but any dynamic occurrence of a date will be properly formatted.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bk79f7ahm5i63@corp.supernews.com>
Rayiner Hashem wrote:
>> No, they aren't. Look at the difference integrating pattern matching with
>> a type system makes, for example.
> 
> You can integrate pattern matching into the type system in a
> dynamically-typed language just fine.

Yet there is an enormous discrepancy between the ubiquity of pattern
matching in static and dynamic languages.

> Cecil's predicate dispatch mechanism, for example, subsumes ML-style
> pattern-matching. 

Predicate classes don't appear to support exhaustiveness and redundancy
checking, static type checking, closed sum types, decision tree
optimization, views and most of the other important features like elegant
syntax. In what way does it "subsume" pattern matching?

>> In theory, yes. In practice, it takes too long to test any non-trivial
>> program against all possible inputs.
> 
> You don't need to test against all possible inputs in order to be
> sufficiently confident...

You are comparing unquantifiable things in order to be "sufficiently
confident".

> that the program will be reliable. Do you think 
> Boeing tests a wing against all possible airloads, or at all possible
> combinations of temperature/pressure/humidity/etc?

I know Boeing build wings with benefit of mathematics and proof.

>> You are assuming that "type" means the same thing in dynamic and static
>> languages, which it does not.
> 
> My argument isn't predicated on that assumption at all.
> 
> Consider the date example. You have a type date, with a constructor
> make-date. The only operation on dates is add-dates which is (date,
> date) -> date. If make-date ensures that the dates are properly
> formatted, and add-dates, given two properly-formatted dates, returns
> a properly formatted date, then you can infer that any date that
> arises in the typing of the program's expressions will be properly
> formatted.
>
> In a dynamic language in the equivalent situation (with a date class
> and a closed add-dates method), a similar dynamic guarantee holds.
> Specifically, any date that occurs dynamically will be properly
> formatted. A dynamic occurrence of a value may or may not be a date,
> but any dynamic occurrence of a date will be properly formatted.

Yes. This is not related to static typing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186620786.751549.147840@22g2000hsm.googlegroups.com>
> Predicate classes don't appear to support exhaustiveness and redundancy
> checking, static type checking, closed sum types, decision tree
> optimization, views and most of the other important features like elegant
> syntax. In what way does it "subsume" pattern matching?

You deconstructed the author's original argument in the five minutes
you skimmed through the paper?

> I know Boeing build wings with benefit of mathematics and proof.

This is the same Boeing that did this test:

http://www.youtube.com/watch?v=6Uo0C01Fwb8

to figure out what the maximum loading of the 777's wings were?

Yes, lot's of math is involved in designing a wing. No, there is
nothing resembling proof involved in the process.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186225127.286816.136700@l70g2000hse.googlegroups.com>
On 3 Aug., 17:51, Cesar Rabak <·······@yahoo.com.br> wrote:
> Ingo Menger escreveu:> On 3 Aug., 14:01, Sacha <····@address.spam> wrote:
>
> >> Your function will be called from another function, right ?
> >> I guess you'll test this other function too !
>
> > How do you do that, as a library writer, for instance?
>
> You put assertions in your library code, for example.

One important point of a type system is to replace runtime errors by
compile time errors.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <lq8x8qbtri.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 3 Aug., 17:51, Cesar Rabak <·······@yahoo.com.br> wrote:
>> Ingo Menger escreveu:> On 3 Aug., 14:01, Sacha <····@address.spam> wrote:
>>
>> >> Your function will be called from another function, right ?
>> >> I guess you'll test this other function too !
>>
>> > How do you do that, as a library writer, for instance?
>>
>> You put assertions in your library code, for example.
>
> One important point of a type system is to replace runtime errors by
> compile time errors.

Only partially. Run type errors usually stay where data crosses the
borders between type domains, especially if the conversion "external
coordinates" => value of type (like in make_date(...,....,....)) is
not total (and in the general case cannot be).

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dg643xrnt7.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Jon Harrop escreveu:
> [snipped]
>
>> So when you say "you're going to write a test suite" you are assuming that
>> the test suite would be the same for a dynamic or static program, which is
>> not correct.
>>
>
> This looks like a non sense to me. Test should prove that business
> requirements are met or no.

Not only. White box tests also test the most common execution paths
and border cases (to catch off-by-one errors). In a dynamically typed
language that means you either have to prove that values of a certain
kind cannot reach the tested code (e.g. the code will never have to
handle the empty string or a number) or you'll have to write a test
case for that. The former is something type inference and static
typing is rather good at. Either you have more work or you'll write
larger test cases.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <jnps25ptci.fsf@hod.lan.m-e-leypold.de>
Olivier Drolet wrote:

> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>
>> The point is that one day one has to pay for certain kinds of
>> flexibility ...
>> The need for testing is, of course, the higher the more dynamic the
>> language.
>
> Unit testing should trap all the type-checking errors, as well as all
> the other errors a type-checking system can't verify, right? And best
> practices sugggest you should perform unit testing for any software
> worthy of consideration, right? (Well, maybe not all software, but
> most non-trivial code...) So, if you're going to write a test suite
> anyway, the penalty for using a dynamic language may be quite small,
> don't you think?

I fear that the set cases you'd have to test would be far bigger:
After all insetad of having the type systems assurance that e.g. a
string will never reach a give piece of code, you know would have to
either write a test how that piece of code handels strings or prove
(manually instead of leaving it to the type system) that strings never
reach the this piece of code.

Regards -- markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <50Esi.31902$LI7.1237022@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> 
> Olivier Drolet wrote:
> 
>> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>>
>>> The point is that one day one has to pay for certain kinds of
>>> flexibility ...
>>> The need for testing is, of course, the higher the more dynamic the
>>> language.
>> Unit testing should trap all the type-checking errors, as well as all
>> the other errors a type-checking system can't verify, right? And best
>> practices sugggest you should perform unit testing for any software
>> worthy of consideration, right? (Well, maybe not all software, but
>> most non-trivial code...) So, if you're going to write a test suite
>> anyway, the penalty for using a dynamic language may be quite small,
>> don't you think?
> 
> I fear that the set cases you'd have to test would be far bigger:
> After all insetad of having the type systems assurance that e.g. a
> string will never reach a give piece of code, you know would have to
> either write a test how that piece of code handels strings or prove
> (manually instead of leaving it to the type system) that strings never
> reach the this piece of code.
> 
> Regards -- markus
> 
> 

I don't test such things. Either the behavior of my function is correct 
or it isn't.

You're all making a big deal of such trivialities, I really don't get it =/

Sacha
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2blkcq61ht.fsf@hod.lan.m-e-leypold.de>
Sacha wrote:

> Markus E.L. 2 wrote:
>> Olivier Drolet wrote:
>>
>>> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>>>
>>>> The point is that one day one has to pay for certain kinds of
>>>> flexibility ...
>>>> The need for testing is, of course, the higher the more dynamic the
>>>> language.
>>> Unit testing should trap all the type-checking errors, as well as all
>>> the other errors a type-checking system can't verify, right? And best
>>> practices sugggest you should perform unit testing for any software
>>> worthy of consideration, right? (Well, maybe not all software, but
>>> most non-trivial code...) So, if you're going to write a test suite
>>> anyway, the penalty for using a dynamic language may be quite small,
>>> don't you think?
>> I fear that the set cases you'd have to test would be far bigger:
>> After all insetad of having the type systems assurance that e.g. a
>> string will never reach a give piece of code, you know would have to
>> either write a test how that piece of code handels strings or prove
>> (manually instead of leaving it to the type system) that strings never
>> reach the this piece of code.
>> Regards -- markus
>>
>
> I don't test such things. Either the behavior of my function is
> correct or it isn't.

And how do you know?

> You're all making a big deal of such trivialities, I really don't get it =/

Well, I care for programm correctness. First step is, to find out
which data is valid as input to a program part and then ensure that
only this kind of data reaches this program part. Me, I know that the
type system is helping me a lot with that task. You say you don't need
it, and you don't test. Now let me ask:

  - How do you state the contract of your procedures/functions?

  - How do you verify that the contract is adhered to at places where
    the function is called?

(I don't say that type systems do all that automatically, but your
answer will probably make a pioint for me :-).

Regards -- Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uWtti.36310$u36.1272950@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> Sacha wrote:
> 
>> Markus E.L. 2 wrote:
>>> Olivier Drolet wrote:
>>>
>>>> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>>>>
>>>>> The point is that one day one has to pay for certain kinds of
>>>>> flexibility ...
>>>>> The need for testing is, of course, the higher the more dynamic the
>>>>> language.
>>>> Unit testing should trap all the type-checking errors, as well as all
>>>> the other errors a type-checking system can't verify, right? And best
>>>> practices sugggest you should perform unit testing for any software
>>>> worthy of consideration, right? (Well, maybe not all software, but
>>>> most non-trivial code...) So, if you're going to write a test suite
>>>> anyway, the penalty for using a dynamic language may be quite small,
>>>> don't you think?
>>> I fear that the set cases you'd have to test would be far bigger:
>>> After all insetad of having the type systems assurance that e.g. a
>>> string will never reach a give piece of code, you know would have to
>>> either write a test how that piece of code handels strings or prove
>>> (manually instead of leaving it to the type system) that strings never
>>> reach the this piece of code.
>>> Regards -- markus
>>>
>> I don't test such things. Either the behavior of my function is
>> correct or it isn't.
> 
> And how do you know?

I test the behavior of each function.

That's the only thing that matters to me. Whatever erroneous type comes 
to it, I don't care, as long as it responds the way it should when 
receiving the parameters it should.

In the end, when all tests are passing, it must be because of there are 
no type errors. But really I don't care about such low level details. 
I'm interested in behavior, nothing else.

Let's be realistic, most development today are no rocket science. Build 
a web site, generate invoices, rate phone calls. Nobody will get hurt by 
a type error in one of my programs.

And as has been said earlier, production sites don't fail because of these.

>> You're all making a big deal of such trivialities, I really don't get it =/
> 
> Well, I care for programm correctness. First step is, to find out
> which data is valid as input to a program part and then ensure that
> only this kind of data reaches this program part.

By factoring you divide the work in such a way, that this is trivial at 
every moment. If it isn't, then you need to think harder at a better 
solution. But yes, I agree that you need to think about what to expect 
and what to respond.

> Me, I know that the
> type system is helping me a lot with that task. You say you don't need
> it, and you don't test. Now let me ask:

I do test, that's my all point ... I test behavior, not typing.

> 
>   - How do you state the contract of your procedures/functions?
> 
>   - How do you verify that the contract is adhered to at places where
>     the function is called?
> 
> (I don't say that type systems do all that automatically, but your
> answer will probably make a pioint for me :-).
> 
> Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8toig$jvo$1@online.de>
Olivier Drolet schrieb:
> On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
> 
>> The point is that one day one has to pay for certain kinds of
>> flexibility ...
>> The need for testing is, of course, the higher the more dynamic the
>> language.
> 
> Unit testing should trap all the type-checking errors, as well as all
> the other errors a type-checking system can't verify, right? And best
> practices sugggest you should perform unit testing for any software
> worthy of consideration, right? (Well, maybe not all software, but
> most non-trivial code...) So, if you're going to write a test suite
> anyway, the penalty for using a dynamic language may be quite small,
> don't you think?

See static types as one kind of unit tests.
In a language with type inference, you don't even have to write the unit 
tests, the compiler will do it for you.
Type inference or not, the compiler will diligently set up any 
conceivable unit test that relates to typing.
It will also not only run the unit tests (which may fail to uncover a 
bug), it will even mathematically prove that the program will never fail 
due to failure to meet one of these unit tests.

In other words, static typing eliminates a whole lot of work writing, 
maintaining and running unit tests.
Seems like a huge win to me.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b268e8$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> In other words, static typing eliminates a whole lot of work writing,
> maintaining and running unit tests.
> Seems like a huge win to me.

Indeed, static typing replaces an infinite number of unit tests.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <z2Esi.31906$%4.1174810@phobos.telenet-ops.be>
Jon Harrop wrote:
> Joachim Durchholz wrote:
>> In other words, static typing eliminates a whole lot of work writing,
>> maintaining and running unit tests.
>> Seems like a huge win to me.
> 
> Indeed, static typing replaces an infinite number of unit tests.
> 

Why the hell should I test for type correctness. As long as behavior is 
correct, that automagically means types are correct.

Seems you don't quite get the spirit behind unit testing. (Or maybe 
that's me)

Sacha
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ps24onyc.fsf@pu100877.student.princeton.edu>
Sacha <····@address.spam> writes:

> Jon Harrop wrote:
>> Joachim Durchholz wrote:
>>> In other words, static typing eliminates a whole lot of work writing,
>>> maintaining and running unit tests.
>>> Seems like a huge win to me.
>>
>> Indeed, static typing replaces an infinite number of unit tests.
>>
>
> Why the hell should I test for type correctness. As long as behavior
> is correct, that automagically means types are correct.
>
> Seems you don't quite get the spirit behind unit testing. (Or maybe
> that's me)

So then what is the point of arguing with them, especially on c.l.l?

BTW, both haskell and the frog language have unit testing frameworks,
probably written by poor misguided souls who didn't know that the
language had static typing which eliminates the need for it.

Tamas
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186142670.980488.198600@g4g2000hsf.googlegroups.com>
On 3 Aug., 13:30, Tamas Papp <······@gmail.com> wrote:
> Sacha <····@address.spam> writes:
> > Jon Harrop wrote:
> >> Joachim Durchholz wrote:
> >>> In other words, static typing eliminates a whole lot of work writing,
> >>> maintaining and running unit tests.
> >>> Seems like a huge win to me.
>
> >> Indeed, static typing replaces an infinite number of unit tests.
>
> > Why the hell should I test for type correctness. As long as behavior
> > is correct, that automagically means types are correct.
>
> > Seems you don't quite get the spirit behind unit testing. (Or maybe
> > that's me)
>
> So then what is the point of arguing with them, especially on c.l.l?
>
> BTW, both haskell and the frog language have unit testing frameworks,
> probably written by poor misguided souls who didn't know that the
> language had static typing which eliminates the need for it.

Very funny indeed, but nonsense.
Nobody disputes that a type correct program may still have errors.
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <BLEsi.31975$_z1.923715@phobos.telenet-ops.be>
Tamas Papp wrote:
> Sacha <····@address.spam> writes:
> 
>> Jon Harrop wrote:
>>> Joachim Durchholz wrote:
>>>> In other words, static typing eliminates a whole lot of work writing,
>>>> maintaining and running unit tests.
>>>> Seems like a huge win to me.
>>> Indeed, static typing replaces an infinite number of unit tests.
>>>
>> Why the hell should I test for type correctness. As long as behavior
>> is correct, that automagically means types are correct.
>>
>> Seems you don't quite get the spirit behind unit testing. (Or maybe
>> that's me)
> 
> So then what is the point of arguing with them, especially on c.l.l?
> 
> BTW, both haskell and the frog language have unit testing frameworks,
> probably written by poor misguided souls who didn't know that the
> language had static typing which eliminates the need for it.
> 
> Tamas

You misunderstand me, I'm a lisp user, and use unit testing. I merely 
mean that behavior testing will catch the type errors. There's no point 
in checking specifically for type correctness as this comes along.

Sacha
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186143417.357352.269020@l70g2000hse.googlegroups.com>
On 3 Aug., 13:58, Sacha <····@address.spam> wrote:
> Tamas Papp wrote:
> > Sacha <····@address.spam> writes:
>
> >> Jon Harrop wrote:
> >>> Joachim Durchholz wrote:
> >>>> In other words, static typing eliminates a whole lot of work writing,
> >>>> maintaining and running unit tests.
> >>>> Seems like a huge win to me.
> >>> Indeed, static typing replaces an infinite number of unit tests.
>
> >> Why the hell should I test for type correctness. As long as behavior
> >> is correct, that automagically means types are correct.
>
> >> Seems you don't quite get the spirit behind unit testing. (Or maybe
> >> that's me)
>
> > So then what is the point of arguing with them, especially on c.l.l?
>
> > BTW, both haskell and the frog language have unit testing frameworks,
> > probably written by poor misguided souls who didn't know that the
> > language had static typing which eliminates the need for it.
>
> > Tamas
>
> You misunderstand me, I'm a lisp user, and use unit testing. I merely
> mean that behavior testing will catch the type errors.

No, that's wrong.
You can test that the behaviour is okay, given that some prerequisites
are fulfilled (i.e., right number of arguments of the right type).
But you can not prove with tests that those prerequisites will be true
any time. The type system can prove that *some* prerequisites will be
true. For example:

fac 0 = 1
fac n = n * fac (n-1)

The type system can ensure that fac will always be called with an
integer argument. It may not be able to ensure that this will be a
positive integer. But this is the kind of error that will be found
with behavioral testing. Perhaps, a more powerful type system will be
able to prove such prerequisites one day.
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <i0j19fcloxk8.1sm7tv0m7dx7d.dlg@40tude.net>
Ingo Menger wrote:

> fac 0 = 1
> fac n = n * fac (n-1)
> 
> The type system can ensure that fac will always be called with an
> integer argument. It may not be able to ensure that this will be a
> positive integer. But this is the kind of error that will be found
> with behavioral testing. Perhaps, a more powerful type system will be
> able to prove such prerequisites one day.

Do you have some ideas how to implement this? At least in Haskell the type
system looks not very good when it comes to defining number ranges, like
naturals:

http://www.haskell.org/tutorial/moretypes.html

I'm not a Haskell expert, but I think this can lead to runtime errors. Even
Pascal allows better type declarations, e.g.:

TYPE natural = 0..MaxInt;

I think it should be possible to use this to ensure the specified number
range at compile time.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xmyx87nsg.fsf@ruckus.brouhaha.com>
Frank Buss <··@frank-buss.de> writes:
> > The type system can ensure that fac will always be called with an
> > integer argument. It may not be able to ensure that this will be a
> > positive integer. But this is the kind of error that will be found
> > with behavioral testing. Perhaps, a more powerful type system will be
> > able to prove such prerequisites one day.
> 
> Do you have some ideas how to implement this? At least in Haskell the type
> system looks not very good when it comes to defining number ranges, like
> naturals:

See the Peano number example at:
    http://en.wikibooks.org/wiki/Haskell/Phantom_types

Haskell's type system is nowhere near as fancy as those of languages
designed for that type of thing.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186147785.764958.202210@57g2000hsv.googlegroups.com>
On 3 Aug., 14:49, Frank Buss <····@frank-buss.de> wrote:
> Ingo Menger wrote:
> > fac 0 = 1
> > fac n = n * fac (n-1)
>
> > The type system can ensure that fac will always be called with an
> > integer argument. It may not be able to ensure that this will be a
> > positive integer. But this is the kind of error that will be found
> > with behavioral testing. Perhaps, a more powerful type system will be
> > able to prove such prerequisites one day.
>
> Do you have some ideas how to implement this? At least in Haskell the type
> system looks not very good when it comes to defining number ranges, like
> naturals:
>
> http://www.haskell.org/tutorial/moretypes.html

Yes. This approach only raises the runtime error in another place.
Theoretically, you can encode natural numbers as algebraic data type
like
  data Nat = Zero | Succ Nat
and use this for any computations.
But even this might only shift the problem to another place in the
program, namely the function that converts between integer and Nat.

> I'm not a Haskell expert, but I think this can lead to runtime errors.

You're right.

> Even
> Pascal allows better type declarations, e.g.:
>
> TYPE natural = 0..MaxInt;

It's nice to be able to declare something. It's another matter to
enforce it without making the language totally unusable. For example:

var a, b : natural;
a := 5;
b := 7;

Of what type is (a-b), (b-a), (a+b), (a*b), succ(a), pred(a)?
Would the answer change, when natural was defined like this:
  type natural = 0..10;

See the problem?
For instance, we can design it so, that succ(a) is of the same type as
a. But exactly that gives rise to another possible runtime error whith
subranges, since succ(upperbound) and pred(lowerbound) are undefined.
And so on.

> I think it should be possible to use this to ensure the specified number
> range at compile time.

I doubt this helps much, for the reasons stated. Yet, not all hope is
lost. Perhaps one day we'll have compilers that can automatically
check invariants and prerequisites like that n must be >= 0.
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <zrn9wihgxoef$.4frdhe1kyi0t.dlg@40tude.net>
Ingo Menger wrote:

> I doubt this helps much, for the reasons stated. Yet, not all hope is
> lost. Perhaps one day we'll have compilers that can automatically
> check invariants and prerequisites like that n must be >= 0.

I've read some time ago about a language which pretends to do this:

http://en.wikipedia.org/wiki/SPARK_programming_language

I don't know Ada, but maybe it would be easier to implement such concepts
like automatic theorem prover and static program verification in functional
languages like Haskell?

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186152048.233035.88950@w3g2000hsg.googlegroups.com>
On 3 Aug., 16:05, Frank Buss <····@frank-buss.de> wrote:
> Ingo Menger wrote:
> > I doubt this helps much, for the reasons stated. Yet, not all hope is
> > lost. Perhaps one day we'll have compilers that can automatically
> > check invariants and prerequisites like that n must be >= 0.
>
> I've read some time ago about a language which pretends to do this:
>
> http://en.wikipedia.org/wiki/SPARK_programming_language

I remember having read something about that earlier ...


> I don't know Ada,

I don't either.

> but maybe it would be easier to implement such concepts
> like automatic theorem prover and static program verification in functional
> languages like Haskell?

You bet that ingenious minds are working on this right now!
If you interested and don't know it yet, you may want to subscribe (or
just look at) http://lambda-the-ultimate.org
Automatic program verification is a hot topic there as well as other
questions of language implementation and design. I find 2/3 of
articles posted there quite interesting and enligtening, although
sometimes its a bit academic.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <mw8x8q8wnc.fsf@hod.lan.m-e-leypold.de>
Frank Buss wrote:

> Ingo Menger wrote:
>
>> I doubt this helps much, for the reasons stated. Yet, not all hope is
>> lost. Perhaps one day we'll have compilers that can automatically
>> check invariants and prerequisites like that n must be >= 0.
>
> I've read some time ago about a language which pretends to do this:
>
> http://en.wikipedia.org/wiki/SPARK_programming_language
>
> I don't know Ada, but maybe it would be easier to implement such concepts
> like automatic theorem prover and static program verification in functional
> languages like Haskell?


To a certain extent. But the application area is different. 

Regards -- Markus
From: =?utf-8?b?R2lzbGUgU8ODwqZsZW5zbWk=?= =?utf-8?b?bmRl?=
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <0n1weinpeg.fsf@apal.ii.uib.no>
Frank Buss <··@frank-buss.de> writes:

> Ingo Menger wrote:
> 
> > I doubt this helps much, for the reasons stated. Yet, not all hope is
> > lost. Perhaps one day we'll have compilers that can automatically
> > check invariants and prerequisites like that n must be >= 0.
> 
> I've read some time ago about a language which pretends to do this:
> 
> http://en.wikipedia.org/wiki/SPARK_programming_language
> 
> I don't know Ada, but maybe it would be easier to implement such concepts
> like automatic theorem prover and static program verification in functional
> languages like Haskell?

Now the spark language was not meant to experiment with revolutionary new types,
and is in practice a subset of Ada, and can be compiled by Ada compilers,
but has "annotations" that looks as comments for an Ada compiler, but has
meaning for the spark system, but enhances on the type system to make it
(even more) precise than what you can find in standard Ada. Spark is in
many way automatization of many of the restrictions and rules that has
been applied in safity critical embedded programming.

Ada was originally designed to be suitable for safity critical embedded systems,
like avionics and weapon systems, and was designed with a type system that was to
be as precise as possible, but in a traditional structural imperative and
object-based manner. Spark is a conservative enhancement on this goal.

While Spark is a good tool for it's highly specialised task, it was not
created to improve the state of the art in type systems. In safety critical
real time programming, there is a lot of restrictions. Dynamic allocation
(let alone GC) is not allowed, and recursion is disallowed unless you can
prove that it cannot reach more than a certain depth, and there is a lot
of other restrictions. This is a quite different environment from where
functional programming languages are used.

-- 
Gisle Sælensminde, Phd student, Scientific programmer
Computational biology unit, BCCS, University of Bergen, Norway, 
Email: ·····@cbu.uib.no
The best way to travel is by means of imagination
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ljmyx4k8fr.fsf@hod.lan.m-e-leypold.de>
'gisle AT apal DOT ii DOT uib DOT no (Gisle Sælensminde)' wrote:

> Frank Buss <··@frank-buss.de> writes:
>
>> Ingo Menger wrote:
>> 
>> > I doubt this helps much, for the reasons stated. Yet, not all hope is
>> > lost. Perhaps one day we'll have compilers that can automatically
>> > check invariants and prerequisites like that n must be >= 0.
>> 
>> I've read some time ago about a language which pretends to do this:
>> 
>> http://en.wikipedia.org/wiki/SPARK_programming_language

> Now the spark language was not meant to experiment with revolutionary new types,

What? Who said that SPARK is for "experiment[ing] with revolutionary
new types"? 

People, I'm more and more appalled by ... -- Oh, don't bother.

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f934s2$9l9$1@online.de>
Frank Buss schrieb:
> Ingo Menger wrote:
> 
>> fac 0 = 1
>> fac n = n * fac (n-1)
>>
>> The type system can ensure that fac will always be called with an
>> integer argument. It may not be able to ensure that this will be a
>> positive integer. But this is the kind of error that will be found
>> with behavioral testing. Perhaps, a more powerful type system will be
>> able to prove such prerequisites one day.
> 
> Do you have some ideas how to implement this? At least in Haskell the type
> system looks not very good when it comes to defining number ranges,

You can't have a decidable type system that can handle number ranges, 
because it's undecidable whether the result of a function falls within a 
given range (Goedel's incompleteness theorems need integer arithmetic to 
work, hence static type systems are either undecidable or restricted to 
a strictly weaker domain).

Regards,
Jo
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <i7d4y28wpo.fsf@hod.lan.m-e-leypold.de>
Frank Buss wrote:

> Ingo Menger wrote:
>
>> fac 0 = 1
>> fac n = n * fac (n-1)
>> 
>> The type system can ensure that fac will always be called with an
>> integer argument. It may not be able to ensure that this will be a
>> positive integer. But this is the kind of error that will be found
>> with behavioral testing. Perhaps, a more powerful type system will be
>> able to prove such prerequisites one day.
>
> Do you have some ideas how to implement this? At least in Haskell the type
> system looks not very good when it comes to defining number ranges, like
> naturals:
>
> http://www.haskell.org/tutorial/moretypes.html
>
> I'm not a Haskell expert, but I think this can lead to runtime errors. Even
> Pascal allows better type declarations, e.g.:
>
> TYPE natural = 0..MaxInt;
>
> I think it should be possible to use this to ensure the specified number
> range at compile time.

Actually I think that subrange types are a mistake: They confuse
preconditions and types. There are subtle differences, IMHO: Types
basically enable safe and defined execution in the host environement
(interpretation of raw memory as data interpretation of the
representation of one type as item of anothe type as in C) and give
enought clues to the compiler to optimize performance. Then they also
provide a mechanism for encapsulation (thus enabling tracking of
invariants and hiding representation).

But subrange types are a completely different animal and I think they
have been a historical mistake (actually they have the role of
invariants, pre and postconditions at times but are not flexible
enough).

I think every language should have a sublanguage in which one can
specifiy preconditions and postconditions. Those are checked
dynamically by default, except where the compiler can prove statically
that they are always valid. This would be orthogonal to the type
system (or better the type system provides a fundament on which to
build).

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <nsk5sa8x1l.fsf@hod.lan.m-e-leypold.de>
Sacha wrote:

> Tamas Papp wrote:
>> Sacha <····@address.spam> writes:
>>
>>> Jon Harrop wrote:
>>>> Joachim Durchholz wrote:
>>>>> In other words, static typing eliminates a whole lot of work writing,
>>>>> maintaining and running unit tests.
>>>>> Seems like a huge win to me.
>>>> Indeed, static typing replaces an infinite number of unit tests.
>>>>
>>> Why the hell should I test for type correctness. As long as behavior
>>> is correct, that automagically means types are correct.
>>>
>>> Seems you don't quite get the spirit behind unit testing. (Or maybe
>>> that's me)
>> So then what is the point of arguing with them, especially on c.l.l?
>> BTW, both haskell and the frog language have unit testing frameworks,
>> probably written by poor misguided souls who didn't know that the
>> language had static typing which eliminates the need for it.
>> Tamas
>
> You misunderstand me, I'm a lisp user, and use unit testing. I merely
> mean that behavior testing will catch the type errors. There's no

You know that testing "will" never "catch errors" regardsless wether
they are type errors or others? testing strictly only proves that your
programs delivers the correct output for a limited and very finite
number of inputs.

> point in checking specifically for type correctness as this comes
> along.


Regards -- Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <4Ytti.36312$tH.1211446@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> Sacha wrote:
> 
>> Tamas Papp wrote:
>>> Sacha <····@address.spam> writes:
>>>
>>>> Jon Harrop wrote:
>>>>> Joachim Durchholz wrote:
>>>>>> In other words, static typing eliminates a whole lot of work writing,
>>>>>> maintaining and running unit tests.
>>>>>> Seems like a huge win to me.
>>>>> Indeed, static typing replaces an infinite number of unit tests.
>>>>>
>>>> Why the hell should I test for type correctness. As long as behavior
>>>> is correct, that automagically means types are correct.
>>>>
>>>> Seems you don't quite get the spirit behind unit testing. (Or maybe
>>>> that's me)
>>> So then what is the point of arguing with them, especially on c.l.l?
>>> BTW, both haskell and the frog language have unit testing frameworks,
>>> probably written by poor misguided souls who didn't know that the
>>> language had static typing which eliminates the need for it.
>>> Tamas
>> You misunderstand me, I'm a lisp user, and use unit testing. I merely
>> mean that behavior testing will catch the type errors. There's no
> 
> You know that testing "will" never "catch errors" regardsless wether
> they are type errors or others? testing strictly only proves that your
> programs delivers the correct output for a limited and very finite
> number of inputs.

That's all I need, my experience shows that this strategy is successful.

>> point in checking specifically for type correctness as this comes
>> along.
> 
> 
> Regards -- Markus
> 
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <l31wegk70x.fsf@hod.lan.m-e-leypold.de>
Sacha wrote:

> Markus E.L. 2 wrote:
>> Sacha wrote:
>>
>>> Tamas Papp wrote:
>>>> Sacha <····@address.spam> writes:
>>>>
>>>>> Jon Harrop wrote:
>>>>>> Joachim Durchholz wrote:
>>>>>>> In other words, static typing eliminates a whole lot of work writing,
>>>>>>> maintaining and running unit tests.
>>>>>>> Seems like a huge win to me.
>>>>>> Indeed, static typing replaces an infinite number of unit tests.
>>>>>>
>>>>> Why the hell should I test for type correctness. As long as behavior
>>>>> is correct, that automagically means types are correct.
>>>>>
>>>>> Seems you don't quite get the spirit behind unit testing. (Or maybe
>>>>> that's me)
>>>> So then what is the point of arguing with them, especially on c.l.l?
>>>> BTW, both haskell and the frog language have unit testing frameworks,
>>>> probably written by poor misguided souls who didn't know that the
>>>> language had static typing which eliminates the need for it.
>>>> Tamas
>>> You misunderstand me, I'm a lisp user, and use unit testing. I merely
>>> mean that behavior testing will catch the type errors. There's no
>> You know that testing "will" never "catch errors" regardsless wether
>> they are type errors or others? testing strictly only proves that your
>> programs delivers the correct output for a limited and very finite
>> number of inputs.
>
> That's all I need, my experience shows that this strategy is successful.

Fine. Than by all means go on to work like this. I mean this honestly:
Obviously _you_ don't need a static type system and _perhaps_ won't
even profit from one (clearly two different proposition). You're not
alone: A lot of people have deployed software all around in languages
that lack static typing: Python, Perl, PHP (though if I think about
PHP and the daily full-disclosure I have second thoughts wether PHP is
really a goog example for the viability of programm without static
types -- but this only as an aside). But that doesn't put you in the
position to argue against static typing and doesn't deliver an
argument against the benefits other people might reap from static
typing.

We can now continue the discussion why you won't profit from static
typeing, whereas other people do: Either you work different or you
work restricted to subject in which the benefits of static typing
can't reach you. I don't dare to speculate. But you might want to
disclose a bit?

Regards -- Markus
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <6D0ui.39400$Ku.1198128@phobos.telenet-ops.be>
Markus E.L. 2 wrote:
> Sacha wrote:
> 
>> Markus E.L. 2 wrote:
>>> Sacha wrote:
>>>
>>>> Tamas Papp wrote:
>>>>> Sacha <····@address.spam> writes:
>>>>>
>>>>>> Jon Harrop wrote:
>>>>>>> Joachim Durchholz wrote:
>>>>>>>> In other words, static typing eliminates a whole lot of work writing,
>>>>>>>> maintaining and running unit tests.
>>>>>>>> Seems like a huge win to me.
>>>>>>> Indeed, static typing replaces an infinite number of unit tests.
>>>>>>>
>>>>>> Why the hell should I test for type correctness. As long as behavior
>>>>>> is correct, that automagically means types are correct.
>>>>>>
>>>>>> Seems you don't quite get the spirit behind unit testing. (Or maybe
>>>>>> that's me)
>>>>> So then what is the point of arguing with them, especially on c.l.l?
>>>>> BTW, both haskell and the frog language have unit testing frameworks,
>>>>> probably written by poor misguided souls who didn't know that the
>>>>> language had static typing which eliminates the need for it.
>>>>> Tamas
>>>> You misunderstand me, I'm a lisp user, and use unit testing. I merely
>>>> mean that behavior testing will catch the type errors. There's no
>>> You know that testing "will" never "catch errors" regardsless wether
>>> they are type errors or others? testing strictly only proves that your
>>> programs delivers the correct output for a limited and very finite
>>> number of inputs.
>> That's all I need, my experience shows that this strategy is successful.
> 
> Fine. Than by all means go on to work like this. I mean this honestly:
> Obviously _you_ don't need a static type system and _perhaps_ won't
> even profit from one (clearly two different proposition). You're not
> alone: A lot of people have deployed software all around in languages
> that lack static typing: Python, Perl, PHP (though if I think about
> PHP and the daily full-disclosure I have second thoughts wether PHP is
> really a goog example for the viability of programm without static
> types -- but this only as an aside). But that doesn't put you in the
> position to argue against static typing and doesn't deliver an
> argument against the benefits other people might reap from static
> typing.
> 
> We can now continue the discussion why you won't profit from static
> typeing, whereas other people do: Either you work different or you
> work restricted to subject in which the benefits of static typing
> can't reach you. I don't dare to speculate. But you might want to
> disclose a bit?
> 
> Regards -- Markus

I never argued against static typing!
I think both paradigms should exist. Static typing belongs to 
performance critical areas in my opinion, and also can be used to 
formulate ideas. I'm not sold on the error detection thing.

Sacha
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9c0kr$bqh$1@online.de>
Markus E.L. 2 schrieb:
> PHP (though if I think about
> PHP and the daily full-disclosure I have second thoughts wether PHP is
> really a goog example for the viability of programm without static
> types -- but this only as an aside).

Lack of a type system isn't PHP's main source of security problems.
Lack of prepared SQL statement support has been up to a point (this is 
being addressed right now). Lack of proper namespaces is another. Lack 
of conventions you can rely on in the standard libraries is yet another.

Regards,
Jo
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46B9B33F.8050809@gmail.com>
Joachim Durchholz wrote:
> Markus E.L. 2 schrieb:
>> PHP (though if I think about
>> PHP and the daily full-disclosure I have second thoughts wether PHP is
>> really a goog example for the viability of programm without static
>> types -- but this only as an aside).
> 
> Lack of a type system isn't PHP's main source of security problems.
> Lack of prepared SQL statement support has been up to a point (this is 
> being addressed right now). Lack of proper namespaces is another. Lack 
> of conventions you can rely on in the standard libraries is yet another.
> 
> Regards,
> Jo

Imaging PHP had a subtype of String called UntrustedString and the APIs 
were arranged so that externally created strings were UntrustedStrings 
and the PHP execute and eval only  took Strings. So you'd have to write
"scrub: String -> UntrustedString" calls to sanitize your strings.

SQL injection is a simple dataflow problem that can be reformulated as a 
type-problem. It could also be done dynamically with "taint bits", too 
but I think the type based approach is more efficient.
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <slrnfbn13h.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.functional.]
On Wed, 08 Aug 2007 05:12:47 -0700, Daniel C. Wang <·········@gmail.com> wrote:

[...]

> SQL injection is a simple dataflow problem that can be reformulated as a 
> type-problem. It could also be done dynamically with "taint bits", too 
> but I think the type based approach is more efficient.

In most scripting languages the compiler overhead is payed on every
execution.  Running the taint checker at run time means that you don't
have to check code that isn't viewed.

What would be nice is if you could pass a flag and told the compiler
to do a full dataflow analysis and pick out clear cases of taint
problems.
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ecq1om9rcbjc.v9kvwcfark6g.dlg@40tude.net>
Tamas Papp wrote:

> BTW, both haskell and the frog language have unit testing frameworks,
> probably written by poor misguided souls who didn't know that the
> language had static typing which eliminates the need for it.

I don't know what the frog language is, but you'll need unit tests even in
the most sophisticated static typed language, because it doesn't help you
with logical bugs, e.g. you want to calculate all next possible positions
of a pawn in a chess game and you have forgotten to implement en passant,
which a good unit test reveals, but no type system.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Tamas Papp
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87lkcsoln4.fsf@pu100877.student.princeton.edu>
Frank Buss <··@frank-buss.de> writes:

> Tamas Papp wrote:
>
>> BTW, both haskell and the frog language have unit testing frameworks,
>> probably written by poor misguided souls who didn't know that the
>> language had static typing which eliminates the need for it.
>
> I don't know what the frog language is, but you'll need unit tests even in
> the most sophisticated static typed language, because it doesn't help you
> with logical bugs, e.g. you want to calculate all next possible positions
> of a pawn in a chess game and you have forgotten to implement en passant,
> which a good unit test reveals, but no type system.

I thought that the irony in my post was easy to spot.

Tamas
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <kd4pje8wk8.fsf@hod.lan.m-e-leypold.de>
Tamas Papp wrote:

> Frank Buss <··@frank-buss.de> writes:
>
>> Tamas Papp wrote:
>>
>>> BTW, both haskell and the frog language have unit testing frameworks,
>>> probably written by poor misguided souls who didn't know that the
>>> language had static typing which eliminates the need for it.
>>
>> I don't know what the frog language is, but you'll need unit tests even in
>> the most sophisticated static typed language, because it doesn't help you
>> with logical bugs, e.g. you want to calculate all next possible positions
>> of a pawn in a chess game and you have forgotten to implement en passant,
>> which a good unit test reveals, but no type system.
>
> I thought that the irony in my post was easy to spot.

No. Because we had a number of very similar post that were dead serious.

Regards -- Markus
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186153365.969514.26210@k79g2000hse.googlegroups.com>
On Aug 3, 1:30 pm, Tamas Papp <······@gmail.com> wrote:
> Sacha <····@address.spam> writes:
> > Jon Harrop wrote:
> >> Joachim Durchholz wrote:
> >>> In other words, static typing eliminates a whole lot of work writing,
> >>> maintaining and running unit tests.
> >>> Seems like a huge win to me.
>
> >> Indeed, static typing replaces an infinite number of unit tests.
>
> > Why the hell should I test for type correctness. As long as behavior
> > is correct, that automagically means types are correct.
>
> > Seems you don't quite get the spirit behind unit testing. (Or maybe
> > that's me)
>
> So then what is the point of arguing with them, especially on c.l.l?
>
> BTW, both haskell and the frog language have unit testing frameworks,
> probably written by poor misguided souls who didn't know that the
> language had static typing which eliminates the need for it.
>
> Tamas
the frog language LOL That's a good one .
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186152225.886176.268510@b79g2000hse.googlegroups.com>
On Aug 3, 1:15 am, Joachim Durchholz <····@durchholz.org> wrote:
> Olivier Drolet schrieb:
>
> > On Aug 2, 4:42 am, Ingo Menger <···········@consultant.com> wrote:
>
> >> The point is that one day one has to pay for certain kinds of
> >> flexibility ...
> >> The need for testing is, of course, the higher the more dynamic the
> >> language.
>
> > Unit testing should trap all the type-checking errors, as well as all
> > the other errors a type-checking system can't verify, right? And best
> > practices sugggest you should perform unit testing for any software
> > worthy of consideration, right? (Well, maybe not all software, but
> > most non-trivial code...) So, if you're going to write a test suite
> > anyway, the penalty for using a dynamic language may be quite small,
> > don't you think?
>
> See static types as one kind of unit tests.
> In a language with type inference, you don't even have to write the unit
> tests, the compiler will do it for you.
> Type inference or not, the compiler will diligently set up any
> conceivable unit test that relates to typing.
> It will also not only run the unit tests (which may fail to uncover a
> bug), it will even mathematically prove that the program will never fail
> due to failure to meet one of these unit tests.
>
> In other words, static typing eliminates a whole lot of work writing,
> maintaining and running unit tests.
> Seems like a huge win to me.
>
> Regards,
> Jo

(*)Is it possible to compile haskell could that it's broken, like
some  functions aren't defined or got a wrong params?

(*) I tend to think that static langauges force to keep all the code
right all the time, something that I despise. When a problem is solved
you could write it in any langauge but when the problem is unknown I
need a language that allows me to do something like I don't care if
that functions is undefined or called with wrong arguments at this
moment I'll handle that later, now I have something urgent to do /
try.  I don't know is this how most people work or at least how lisp
programmers work but it just fits my personality. I just couldn't
stand to program in a language that wants me to know all the answers
from the start making promises that meant nothing just to compile the
damn thing. Gee I'm trying weblocks now and guess what I didn't load a
fricking cl+ssl module and I'm still able to try the examples. Maybe
It's just my mentality.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186153207.946457.302580@b79g2000hse.googlegroups.com>
> stand to program in a language that wants me to know all the answers
> from the start making promises that meant nothing just to compile the
> damn thing. Gee I'm trying weblocks now and guess what I didn't load a
> fricking cl+ssl module and I'm still able to try the examples. Maybe
> It's just my mentality.

It's not just you. Yesterday, I decided I wanted to change some fairly
fundamental data structures in my program. I made the change, then
went through the client code reconciling it, fixing and testing one
piece at a time. About a third of the way through, I decided the new
data structure still wasn't completely right, so I made another
smaller change and kept propagating. In short order, I had converted
the whole program to use the new data structure.

How would that workflow work in a statically-typed language? Yes,
making the change and fixing the compile errors would have sped up
finding the affected code a little bit, but without the iterative
testing I wouldn't have caught the second oversight until after I'd
(incorrectly) fixed the rest of the program? Less gracious people will
say I need to think more precisely, but in my experience, you often
don't know exactly what you want until you finish building whatever
you're building. Maybe if you build the same thing over and over
again...
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <v24pjgoawi.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

> It's not just you. Yesterday, I decided I wanted to change some fairly
> fundamental data structures in my program. I made the change, then
> went through the client code reconciling it, fixing and testing one
> piece at a time. About a third of the way through, I decided the new
> data structure still wasn't completely right, so I made another
> smaller change and kept propagating. In short order, I had converted
> the whole program to use the new data structure.

> How would that workflow work in a statically-typed language? Yes,

Very well. I'd fix where the type errors occur -- and thus be sure
that I haven't forgotten to "propagate" changes to some place.

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186237709.346218.105930@x35g2000prf.googlegroups.com>
> Very well. I'd fix where the type errors occur -- and thus be sure
> that I haven't forgotten to "propagate" changes to some place.
>
> Regards -- Markus

So the answer is, static typing fails me here? It would've taken me
twice as long to do this particular task in a statically-typed
language?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9myx6dapr.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Very well. I'd fix where the type errors occur -- and thus be sure
>> that I haven't forgotten to "propagate" changes to some place.
>>
>> Regards -- Markus
>
> So the answer is, static typing fails me here? It would've taken me
> twice as long to do this particular task in a statically-typed
> language?

1. Failed? No, I don't think the compiler would have broken
   down. Typing would have worked as advertised all the time (meaning:
   Well typed programs would have been translated to executables).

2. More serious: No. Not in the scenarios I met. For three reasons:
(1) In a statically type scenario "propagating" changes is usually
faster because the compiler checks your propation and roughly their
correctness. (2) If I don't want to update a complete application I
stage the changes in a testing harness. (3) Changes seldom propagate
farther than up to the next module boundary, so there are not so much
places to change if your application is modular.

Of course you can insist that static typing will fail you in some
scenario you once happened to encounter, and you're certainly free to
conclude that finding one task that _perhaps_ would take _you_ longer
in a statically typed language, that static typing is obstructing the
development process.

But I doubt you can draw this conclusion on this evidence
alone. People who actually work with static typing usually describe
that the typing helps them to make changes in a controlled fashion.

But I cannot disprove you scenario for the simple reason that you
don't give any actual arguments. What you basically say is "I once hat
the following experience. What about that?". But what about that? I
change data structures and interfaces all the time in statically types
languages and don't have a problem with that? How can I convince you
that you wouldn't have a problem? I can't -- for the simple reason
that the experience you're telling us about doesn't have any
significance. To draw any conclusion as you imply -- that statically
type languages make you slower -- you'd have to suppose

 - the scenario you describe (that changes are aborted are recognized
   as incomplete before they are complete and need to be
   augmented/improved) is fairly typical and occurs fairly often.

 - that in a statically typed language the programmer would not have
   recognized the insufficiency of the change before the changes would
   have been propagated over the whole program (Hint: I suggest, since
   type checking replaces many of the consistency testis people have
   to do in dynamically typed languages, that he would have probably
   found the problem during propation to, but perhaps not from testing
   but from the type errors or by the simply reason that he's actually
   looking at the code he's changing)

 - that there would not have been any other way to test the changes
   then propgate them through the whole application.

 - that there are no compensating speed ups in other steps of the
   development process (hint: typos are usually caught very quickly)

 - and so on.


So it's hard to actually oppose you're hypothesis because you're not
making a complete case. You imply that programming in statically typed
languages is slower. You ask about one specific thing that happened to
you (and actually the description is vague enough that nothing can be
said from it). Those two don't connect. I can't contradict you, just
as I can't contradict a number of other people in this discussion:
Somewhere else Rainer starts with they assertion that Lisp is a
multiparadigm language and that one can do things in Lisp that one
can't do in any other language. A number of postings later we're down
to the argument that Lisp has a "real goto statement" (as opposed to
the somehow not so real tail call of the functional languages). That
seems a bit poor as an argument but since nobody tries actually to
refer back to the original assertion one stays bogged down in
discussion differences between simulating goto with tail calls and a
real goto syntax (actually splitting hairs). 

Your attempt on discusssing the topic fails along similar lines: What
is your scenario supposed to tell us? What conclusion do you want to
draw from it? What actually means "static typing fails me here?" Do we
even know enough about your scenario to draw any conclusions? This is
all so fuzzy -- You know I once had a dynamically typed program and it
didn't work either -- so now? Do you see how "I once had ..."
arguments don't work, at least not alone (They might be good
illustrations of something you want to say but are no arguments in
themselves).


Regards -- Markus
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2ejihqfym.fsf@snobis.de>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> I change data structures and interfaces all the time in statically
> types languages and don't have a problem with that? How can I
> convince you that you wouldn't have a problem?

> Furthermore, if live pathcing becomes a development methodology
> rather than an upgrade mechanism I start to wonder how those people
> do version control and code reviews: It must be really difficult to
> recover the current state of code in the image after patching in all
> changes?

Some very good observations: People and the way thy are thinking are
quite different. You have no problems with strong static typing, but
you can't really imagine how working in a typeless or dynamic typed
language is like -- and even if you try I think it would just not work
as good for you.

But also there are other people which are quite the other way round.

It's not very interesting which group is bigger, who found his way to
his language which way or the like. People are too different for any
single language and any single way of development.

And by the way: We all here should unite strength against the
mainstream C# and Java instead of making war against each other. :)

-- 
Stefan.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ffbqdl2ewh.fsf@hod.lan.m-e-leypold.de>
Stefan Nobis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
>
>> I change data structures and interfaces all the time in statically
>> types languages and don't have a problem with that? How can I
>> convince you that you wouldn't have a problem?
>
>> Furthermore, if live pathcing becomes a development methodology
>> rather than an upgrade mechanism I start to wonder how those people
>> do version control and code reviews: It must be really difficult to
>> recover the current state of code in the image after patching in all
>> changes?
>
> Some very good observations: People and the way thy are thinking are
> quite different. You have no problems with strong static typing, but
> you can't really imagine how working in a typeless or dynamic typed
> language is like 

Wrong. I can imagine and actually work habitually in a number of
dynamically typed languages. What I'm critizing in this subthread is
(reread my words) the dynamic patching of system.

The rest of your contribution doesn't address the concerns I raised:
How one is supposed to do QA and version control when patching systems
live.

Stefan Nobis wrote:

> And by the way: We all here should unite strength against the
> mainstream C# and Java instead of making war against each other. :)

And why is that so? C# has garbage collection and I hear, closures of
some sort. I'm not quite certain yet about where the type system will
leave me. Why should I fight C#?

Regards -- Markus
From: Stefan Nobis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2abt4r6kt.fsf@snobis.de>
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) writes:

> The rest of your contribution doesn't address the concerns I raised:
> How one is supposed to do QA and version control when patching systems
> live.

Hmmm... the dynamic guys should believe you that static system will
catch an enormous amount of errors but you can't believe patching a
live system could be a reasonable and quality assuring development
method...

That's exactly my point: People are quite different, problems are
quite different and so there are very different ways to get work
successfully done.

I'm quite sure in 10 or 20 years there will still be both, static and
dynamic systems and both will be used for big, complex applications
just because people are too different for one single way to develop
programs.

-- 
Stefan.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1yps20hbmp.fsf@hod.lan.m-e-leypold.de>
Stefan Nobis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L. 2) writes:
>
>> The rest of your contribution doesn't address the concerns I raised:
>> How one is supposed to do QA and version control when patching systems
>> live.
>
> Hmmm... the dynamic guys should believe you that static system will
> catch an enormous amount of errors but you can't believe patching a
> live system could be a reasonable and quality assuring development
> method...

Exactly. So we're actually back to: Only hot patch with code tested
elsewhere, after booting a test system completely from a pristine
image and source from version control. Which reintroduce the step
"compiling" through the back door and again lets us with along
turnaround (admittedly one can still hot patch and thus, soppossedly
keep the now-and-then reconstructed test system in sync with the
production system: A technology the actually fascinates me from a
software engineering point of view. Still I see a lot of problems
there and the development will then more be done in the classical
style: One will have to put complete source into the repository to
build a system from a pristine image. (And while I can test
dynamically as much as I want, I probably would have to retype may
solution for source files to check it in, or is there a way around
this?).


> That's exactly my point: People are quite different, problems are
> quite different and so there are very different ways to get work
> successfully done.

My impression was, that version control and source archiving is a
requirement for responsible software engineering. If a development
method doesn't integrate well with it, its simply unsuitable.

> I'm quite sure in 10 or 20 years there will still be both, static and
> dynamic systems and both will be used for big, complex applications
> just because people are too different for one single way to develop
> programs.


We will see :-).

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hdk7aF396mmaU1@mid.individual.net>
Ingo Menger wrote:
> On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
>> Ingo Menger wrote:
> 
>>> One can understand type inference as a feature that sorts out programs
>>> that can't possibly run without errors and leaves those that have
>>> errors most probably.
>> No, a static type system that enforces type soundness always has to
>> reject certain programs that may succeed at runtime.
> 
> This is a question of semantics.
> One can define a semantic, where nonsensical actions like passing the
> empty list to a function that extracts the second element from a
> tuple, would  result in some more or less meaningful value.
> An example of a language with such a forgiving semantic is perl.
> Certain nonsensical constructs produce the value undef and the program
> can go on with the undef value.

No, it's more than that.

>> whereas in dynamic languages, preference is given
>> to flexibility, especially with regard to testing and reflective
>> capabilities in a language.
> 
> The point is that one day one has to pay for certain kinds of
> flexibility ...
> The need for testing is, of course, the higher the more dynamic the
> language.

Maybe, maybe not. Largely irrelevant here, though.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186047286.447185.225850@w3g2000hsg.googlegroups.com>
On 2 Aug., 10:50, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:
> > On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
> >> Ingo Menger wrote:
>
> >>> One can understand type inference as a feature that sorts out programs
> >>> that can't possibly run without errors and leaves those that have
> >>> errors most probably.
> >> No, a static type system that enforces type soundness always has to
> >> reject certain programs that may succeed at runtime.
>
> > This is a question of semantics.
> > One can define a semantic, where nonsensical actions like passing the
> > empty list to a function that extracts the second element from a
> > tuple, would  result in some more or less meaningful value.
> > An example of a language with such a forgiving semantic is perl.
> > Certain nonsensical constructs produce the value undef and the program
> > can go on with the undef value.
>
> No, it's more than that.

Yes, it's something like that.
At least it makes no sense to speak of "programs" that "succeed" at
runtime without referring to a semantic.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hg7qrF3jbgn7U1@mid.individual.net>
Ingo Menger wrote:
> On 2 Aug., 10:50, Pascal Costanza <····@p-cos.net> wrote:
>> Ingo Menger wrote:
>>> On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
>>>> Ingo Menger wrote:
>>>>> One can understand type inference as a feature that sorts out programs
>>>>> that can't possibly run without errors and leaves those that have
>>>>> errors most probably.
>>>> No, a static type system that enforces type soundness always has to
>>>> reject certain programs that may succeed at runtime.
>>> This is a question of semantics.
>>> One can define a semantic, where nonsensical actions like passing the
>>> empty list to a function that extracts the second element from a
>>> tuple, would  result in some more or less meaningful value.
>>> An example of a language with such a forgiving semantic is perl.
>>> Certain nonsensical constructs produce the value undef and the program
>>> can go on with the undef value.
>> No, it's more than that.
> 
> Yes, it's something like that.
> At least it makes no sense to speak of "programs" that "succeed" at
> runtime without referring to a semantic.

All static type systems have to reject programs that may succeed at 
runtime. The assumption of defenders of static type systems is that the 
kinds of programs that may succeed but are nevertheless rejected are 
marginal and uninteresting. The assumption of defenders of dynamic type 
systems is that they are not.

This is independent of what the actual semantics are.

An important case is when programming languages provide reflection at 
runtime, including both introspection and intercession. Especially the 
latter makes it at least extremely difficult to statically type-check 
programs written in such languages. There is an important class of 
programs whose behaviors need to be updated and changed at runtime, and 
such programs rely on reflection at runtime. I am not aware of any 
static type system that is able to check such programs before 
deployment, and I am convinced that they will never be able to do so, so 
I don't bother.

That's my personal conviction and I don't want to argue about personal 
convictions. I am fine if you disagree that such programs are important, 
as long as you are aware that this is mainly a matter of personal 
opinion. I only strongly disagree with defenders of static type systems 
as soon as static typing is portrayed as the objectively only reasonable 
way to go, because that is stupid.

I just want to inform you about the fact that there are people who care 
about the programs that static type systems cannot check and who 
understand what the underlying issues are.



Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186140162.507481.11450@w3g2000hsg.googlegroups.com>
On 3 Aug., 10:37, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:

> > Yes, it's something like that.
> > At least it makes no sense to speak of "programs" that "succeed" at
> > runtime without referring to a semantic.
>

> That's my personal conviction and I don't want to argue about personal
> convictions. I am fine if you disagree that such programs are important,
> as long as you are aware that this is mainly a matter of personal
> opinion.

I don't disagree.
You know, there is ongoing research in type systems, etc.
This is just because "we" feel, that our current type systems could be
even more powerful.
But this is also admitting that todays strongly typed languages do not
cover everything.

> I only strongly disagree with defenders of static type systems
> as soon as static typing is portrayed as the objectively only reasonable
> way to go, because that is stupid.

Depends on what you mean with "way to go".
Is it the practical question of a project team in 2007, what kind of
language to use for the ohh-so-important-hot-upgradable-yet-another-
web-app X? Then I agree with you.
Or is it the prospect of the future of programming languages? My
personal opinion here is that we will have untyped languages for toy
scripts, as it used to be. And for the rest, where dollars or human
lifes count, we'll have languages with even more advanced type
systems. The time is not so far away when we will regard an
ArrayIndexOutOfBoundsException a typing error, just as we today may
regard a NullPointerException a typing error (or rather a misdesign in
the language (i.e. java) that allows one to confuse values of type T
and Maybe T (in haskell parlance))

> I just want to inform you about the fact that there are people who care
> about the programs that static type systems cannot check and who
> understand what the underlying issues are.

I respect that, because I can imagine that such programs really exist.

But when I hear statements like "the type system disturbs me in my
usual code-some-idea-and-test-it cycle", or the like then I have the
impression that certain people can't see how development in a strongly
typed language is like. It's like those FORTRAN IV programmers that
refuse(d*) to adapt to new languages because there was no FORMAT
statement, no computed GOTO and no COMMON block with EQUIVALENCE
clauses.

*Meanwhile, they will all be retired, I guess.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186152145.943083.25580@q75g2000hsh.googlegroups.com>
> scripts, as it used to be. And for the rest, where dollars or human
> lifes count, we'll have languages with even more advanced type
> systems. The time is not so far away when we will regard an
> ArrayIndexOutOfBoundsException a typing error

There is no free lunch. A program is a specification of behavior. A
type model is a specification of constraints on that behavior. Any
type-model sophisticated enough to fully constrain the behavior of a
program will be no different from a programming language, and hence
just as susceptible to error. Your "sufficiently smart type system"
will need unit tests to ensure the correctness of your type models.

On that note, one fact that seems conspiculously missing is that
macros make it trivial to define static constraints on the usage of
code. Consider, for example, a "with-resource" construct that has the
constraint that a client should only be allowed to use a small set of
"safe" functions while holding that resource. It is possible to
express this in a useful way with Lisp macros. How do you express this
in the Haskell type system?
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186154632.855482.7320@r34g2000hsd.googlegroups.com>
On 3 Aug., 16:42, Rayiner Hashem <·······@gmail.com> wrote:
> > scripts, as it used to be. And for the rest, where dollars or human
> > lifes count, we'll have languages with even more advanced type
> > systems. The time is not so far away when we will regard an
> > ArrayIndexOutOfBoundsException a typing error
>
> There is no free lunch. A program is a specification of behavior. A
> type model is a specification of constraints on that behavior. Any
> type-model sophisticated enough to fully constrain the behavior of a
> program will be no different from a programming language, and hence
> just as susceptible to error.

The point is just to make programming languages so that code written
in them is *less* susceptible. You won't deny that since the days
where we keyed in octal codes on the control panel, some progress has
been made in that matter. There is no reason to believe that we can't
go further.

> Your "sufficiently smart type system"
> will need unit tests to ensure the correctness of your type models.

No.
It's just that the need for tests will never be eliminated.


> On that note, one fact that seems conspiculously missing is that
> macros make it trivial to define static constraints on the usage of
> code. Consider, for example, a "with-resource" construct that has the
> constraint that a client should only be allowed to use a small set of
> "safe" functions while holding that resource. It is possible to
> express this in a useful way with Lisp macros. How do you express this
> in the Haskell type system?

I'm not quite sure what this would be good for. But, alas, no excuses!
I am not saying that the haskell type system is capable of expressing
any semantic constraint. In another subthread here we find, for
example, that it's not that easy to make sure through the type system
that a function gets a non-negative number without introducing
possibilities for runtime errors elsewhere.
But there's no doubt that type systems and related tools for automatic
program verification will evolve so as to prevent the very possibility
of ever more runtime errors.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186162295.120311.276110@d55g2000hsg.googlegroups.com>
> The point is just to make programming languages so that code written
> in them is *less* susceptible. You won't deny that since the days
> where we keyed in octal codes on the control panel, some progress has
> been made in that matter. There is no reason to believe that we can't
> go further.

You have to account for all the possible sources of error in the
program. A static compiler with a perfectly-expressive type system can
guarantee that code conforms to type models expressed in that type
system. It can make no guarantees about whether that type model
correctly expresses the actual desired behavior of the program.
Indeed, in the limiting case, the type model becomes a precise
specification of the behavior of the program and the code becomes
redundant. There is no evidence to suggest that such a type model will
be any less susceptible to error than the code that would have
implemented the equivalent behavior. It's easier, of course, to
analyze the type model of a Haskell program than to analyze regular
Haskell code, but that's only because the type model is so much more
limited in what it can express. There is no guarantee that a
sufficiently-expressive type system will retain this ease of analysis,
and good reason to believe that it won't.

> I'm not quite sure what this would be good for.

This sort of constraint arises all the time. Consider something like
an interrupt routine or signal handler, where only a small set of
functions are safe to call from within the handler.

There are, indeed, tons of domain-specific constraints that can be
easily expressed in Lisp macros that cannot be easily expressed in
most type systems. A macro for specifying instruction patterns in an
assembler can check to make sure certain machine conventions are
obeyed (eg: on x86, you can have a mem source, or a mem dest, but not
both). A macro for arranging filters into a audio-processing pipeline
can signal an error if a maximum latency is exceeded. This can all be
done at compile time, just like a sufficiently expressive type system.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x8x8sv6lo.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> There is no free lunch. A program is a specification of behavior. A
> type model is a specification of constraints on that behavior. Any
> type-model sophisticated enough to fully constrain the behavior of a
> program will be no different from a programming language, and hence
> just as susceptible to error. Your "sufficiently smart type system"
> will need unit tests to ensure the correctness of your type models.

By that logic, sufficiently thorough unit tests will need unit tests
of their own as well.  It never stops.

Your argument seems to be that static types can't eliminate 100% of
program errors, and as long as they can eliminate only 99.999% then
they're useless.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186166329.283049.178160@57g2000hsv.googlegroups.com>
> By that logic, sufficiently thorough unit tests will need unit tests
> of their own as well.  It never stops.

Absolutely. You can't prove an implementation correct, no matter how
you go about it.

> Your argument seems to be that static types can't eliminate 100% of
> program errors, and as long as they can eliminate only 99.999% then
> they're useless.

What I'm challenging is the unfounded assumption that a sufficiently-
expressive type system would be able to eliminate 99.999% of errors
(or indeed, even any more errors at all than careful unit-testing!)
The fundamental point you're missing is that in any type system in
which the compiler can check everything (or nearly everything), a type
model expressed in that type system becomes a complete (or nearly
complete) specification of the behavior of the program. What is the
basis of your claim that a behavioral specification expressed as a
type model is any less susceptible to error than a behavioral
specification expressed as code? You can't extrapolate from the ease
of proofs in existing, unexpressive type systems, because the ease of
analysis of those systems rests on the fact that they cannot express
general program behaviors.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <s1ir7uadjc.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> By that logic, sufficiently thorough unit tests will need unit tests
>> of their own as well.  It never stops.
>
> Absolutely. You can't prove an implementation correct, no matter how
> you go about it.
>
>> Your argument seems to be that static types can't eliminate 100% of
>> program errors, and as long as they can eliminate only 99.999% then
>> they're useless.
>
> What I'm challenging is the unfounded assumption that a sufficiently-
> expressive type system would be able to eliminate 99.999% of errors

That we agree. Probably it's a lower number, but it has been suggested
it's significantly more than 10% and I'd agree with that.

> (or indeed, even any more errors at all than careful unit-testing!)

And that statement shows that you don't have the required
experience. Sorry: Apart from the work the work to build such a
unit-testing library (which needs to be more extensive if you don't
have static typeing), unit-testing covers other areas and will never
replace type systems (and, where this is needed, program verification)

> The fundamental point you're missing is that in any type system in
> which the compiler can check everything (or nearly everything), a type
> model expressed in that type system becomes a complete (or nearly
> complete) specification of the behavior of the program. 

The point is that nobody actually asserted this. may I say "straw man
argument" here?

> What is the basis of your claim that a behavioral specification
> expressed as a type model is any less susceptible to error than a
> behavioral specification expressed as code?

Nobody said that AFAIR. Not all errors are behavioral errors. Indeed
most are not.

> You can't extrapolate from the ease of proofs in existing,
> unexpressive type systems, because the ease of analysis of those
> systems rests on the fact that they cannot express general program
> behaviors.

And so? No proponent of static typing here aimed at expressing
"general program behaviors". The assertion alone is incredibly
stupid. I'm tempted to suggest that you study up a bit on type theory,
it's purpose and topics. I really can recommend the tutorial papers of
Luca Cardelli.

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6935DA.20071003082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rayiner Hashem <·······@gmail.com> writes:
> > There is no free lunch. A program is a specification of behavior. A
> > type model is a specification of constraints on that behavior. Any
> > type-model sophisticated enough to fully constrain the behavior of a
> > program will be no different from a programming language, and hence
> > just as susceptible to error. Your "sufficiently smart type system"
> > will need unit tests to ensure the correctness of your type models.
> 
> By that logic, sufficiently thorough unit tests will need unit tests
> of their own as well.  It never stops.
> 
> Your argument seems to be that static types can't eliminate 100% of
> program errors, and as long as they can eliminate only 99.999% then
> they're useless.

How about something between 1% to 10% of program errors?

-- 
http://lispm.dyndns.org
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f90cls$f62$2@aioe.org>
Rainer Joswig escreveu:
> In article <··············@ruckus.brouhaha.com>,
>  Paul Rubin <·············@NOSPAM.invalid> wrote:
> 
>> Rayiner Hashem <·······@gmail.com> writes:
>>> There is no free lunch. A program is a specification of behavior. A
>>> type model is a specification of constraints on that behavior. Any
>>> type-model sophisticated enough to fully constrain the behavior of a
>>> program will be no different from a programming language, and hence
>>> just as susceptible to error. Your "sufficiently smart type system"
>>> will need unit tests to ensure the correctness of your type models.
>> By that logic, sufficiently thorough unit tests will need unit tests
>> of their own as well.  It never stops.
>>
>> Your argument seems to be that static types can't eliminate 100% of
>> program errors, and as long as they can eliminate only 99.999% then
>> they're useless.
> 
> How about something between 1% to 10% of program errors?
> 
Easier to agree :-)
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f934d3$9d7$1@online.de>
Rainer Joswig schrieb:
> In article <··············@ruckus.brouhaha.com>,
>> Your argument seems to be that static types can't eliminate 100% of
>> program errors, and as long as they can eliminate only 99.999% then
>> they're useless.
> 
> How about something between 1% to 10% of program errors?

That figure is grossly underrating the power of static typing. Even in 
Pascal (and similar primites+arrays+records languages), the type system 
can eliminate more than 50% of errors; the trade-off is bad because 
writing down the types is such a nuisance, not because there the win is 
negligible.

With a Hindley-Milner type system, both domain of things that can be 
represented in the type system is larger (70%? 90%? dunno really), and 
writing down the types is far less work.

Regards,
Jo
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-41CB4A.03034905082007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Rainer Joswig schrieb:
> > In article <··············@ruckus.brouhaha.com>,
> >> Your argument seems to be that static types can't eliminate 100% of
> >> program errors, and as long as they can eliminate only 99.999% then
> >> they're useless.
> > 
> > How about something between 1% to 10% of program errors?
> 
> That figure is grossly underrating the power of static typing. Even in 
> Pascal (and similar primites+arrays+records languages), the type system 
> can eliminate more than 50% of errors; the trade-off is bad because 
> writing down the types is such a nuisance, not because there the win is 
> negligible.
> 
> With a Hindley-Milner type system, both domain of things that can be 
> represented in the type system is larger (70%? 90%? dunno really), and 
> writing down the types is far less work.
> 
> Regards,
> Jo

I think a number like 70% is an illusion.

90% of the errors are semantic errors, logic errors,
not completely implementing the
spec errors, approximations, accumulating float errors,
holes, partial implementations, inconsistencies,
not matching non-functional requirements, errors
in the compiler, errors in interfacing surrounding software,
errors dealing with complex runtime situations
(network connection, data transfer failures, data
corruption, ...) and so on.

No type system will catch a missing factor in a formular.
No type system will catch a missing clause to deal with
some input data.
No type system will catch logic error (is it 'and' or 'or')?.
No type system will make your code meet a certain real-time
requirement.
No type system will make your system will make your UI
layout correct, so all elements are visible.
...

Plus the type system adds a certain amount of errors, that were
not errors without it.

I'm NOT saying that static type checking isn't useful, but
getting rid of 70% of programmer errors is totally
unrealistic.
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sddabt6u5fl.fsf@shell01.TheWorld.com>
Rainer Joswig <······@lispmachine.de> writes:

> No type system will catch a missing clause to deal with
> some input data.

Actually, that's exactly what type systems are for.  One designs the
type system to catch the types of input data that will be supplied,
and the type system will make certain that all of your pattern
matching statements have code that addresses each of those
circumstances.  One can then quickly inspect each of those statements
to validate that they do the correct thing (and use that knowledge to
construct tests that give you good coverage).  

But one of the key things that the type system has bought you is the
exhaustivity check, that you have written code to cover all the cases.

It's also simpler to inspect that the type system is complete, because
the type system is simpler (and more abstract) than the code which
uses the types to make decisions.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-0E761E.05090205082007@news-europe.giganews.com>
In article <···············@shell01.TheWorld.com>,
 Chris F Clark <···@shell01.TheWorld.com> wrote:

> Rainer Joswig <······@lispmachine.de> writes:
> 
> > No type system will catch a missing clause to deal with
> > some input data.
> 
> Actually, that's exactly what type systems are for.  One designs the
> type system to catch the types of input data that will be supplied,
> and the type system will make certain that all of your pattern
> matching statements have code that addresses each of those
> circumstances.  One can then quickly inspect each of those statements
> to validate that they do the correct thing (and use that knowledge to
> construct tests that give you good coverage).  
> 
> But one of the key things that the type system has bought you is the
> exhaustivity check, that you have written code to cover all the cases.
> 
> It's also simpler to inspect that the type system is complete, because
> the type system is simpler (and more abstract) than the code which
> uses the types to make decisions.

Your program is correct as of 10:30am. 10:35am there is a new
part number range given to a new machine model. What now?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <oitzre61sm.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <···············@shell01.TheWorld.com>,
>  Chris F Clark <···@shell01.TheWorld.com> wrote:
>
>> Rainer Joswig <······@lispmachine.de> writes:
>> 
>> > No type system will catch a missing clause to deal with
>> > some input data.
>> 
>> Actually, that's exactly what type systems are for.  One designs the
>> type system to catch the types of input data that will be supplied,
>> and the type system will make certain that all of your pattern
>> matching statements have code that addresses each of those
>> circumstances.  One can then quickly inspect each of those statements
>> to validate that they do the correct thing (and use that knowledge to
>> construct tests that give you good coverage).  
>> 
>> But one of the key things that the type system has bought you is the
>> exhaustivity check, that you have written code to cover all the cases.
>> 
>> It's also simpler to inspect that the type system is complete, because
>> the type system is simpler (and more abstract) than the code which
>> uses the types to make decisions.
>
> Your program is correct as of 10:30am. 10:35am there is a new
> part number range given to a new machine model. What now?


Do you really want to say that this a problem in a statically typed
language and none in Lisp? Because you don't check input in Lisp? Care
to elaborate? (You seem still to think that all input checks are
somehow magically encoded in the type system, don't you?)

Regards -- Markus
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sddfy2yuhpc.fsf@shell01.TheWorld.com>
Rainer Joswig <······@lisp.de> writes:

> In article <···············@shell01.TheWorld.com>,
>  Chris F Clark <···@shell01.TheWorld.com> wrote:
>
>> Rainer Joswig <······@lispmachine.de> writes:
>> 
>> > No type system will catch a missing clause to deal with
>> > some input data.
>> 
>> Actually, that's exactly what type systems are for.  One designs the
>> type system to catch the types of input data that will be supplied,
>> and the type system will make certain that all of your pattern
>> matching statements have code that addresses each of those
>> circumstances.  One can then quickly inspect each of those statements
>> to validate that they do the correct thing (and use that knowledge to
>> construct tests that give you good coverage).  
>> 
>> But one of the key things that the type system has bought you is the
>> exhaustivity check, that you have written code to cover all the cases.
>> 
>> It's also simpler to inspect that the type system is complete, because
>> the type system is simpler (and more abstract) than the code which
>> uses the types to make decisions.
>
> Your program is correct as of 10:30am. 10:35am there is a new
> part number range given to a new machine model. What now?

At 10:35, you add the new part numbers to the type system.  If you are
interested in a quick-and-dirty fix, you patch all the places the
system indicates type errors with code, that says "not supported yet."
In fact, if it is something that happens often, you have a tool that
adds a suitable default to the patterns that does that for you which
you use for prototyping--running with this is no different than
dynamic typing, you simply get run-time errors.

However, eventually, you want a production quality version and you
don't want that "not supported yet" default behavior in it.  When, you
want that version, you don't have such a default, and you don't allow
"not supported yet." messages, and you go through all the type errors
and determine what the correct behavior is for the code.

The key difference here is that you have a tool (the static typing
system) that ensures that you haven't forgotten something in your
production version.  If you never forget something, maybe you don't
need it.  But, I've never had something significantly complex where
there wasn't something that I didn't forget handling some detail in.
This methodology saved my honor numerous times.  

Note, if you are working on very prototype code, you can leave those
"not supported yet" messages in for long periods of time.  However,
the type system will ensure that you have documented (with the "not
supported yet" message) all the places the code needs to be fixed and
you can't forget the fact that you haven't resolved that problem yet
and mistakenly think you are done, problem solved, when it really
isn't.

Note this is exactly the methodology we followed when developing the
750K SLOC tool that only had 3 errors in the production version over 5
years.  As I said before, it wasn't the only factor in the low error
rate, but it was a significant one.

If you read carefully, this has been my main issue through-out this
thread, people are inherently careless and forgetful and need a tool
the compensates for that behavior (and for this case static typing
works for that tool).  It is not a panacea, but it does substantially
make up for one of the flaws in human nature.

If I come to a point in the code where I encounter a "not supported
yet" message, then I know I can fix the code.  If I come to the same
piece of code and there simply isn't anything there, I don't know if
that omission is intentional and some other code depends on this code
NOT doing something here or accidental because the original coder
forgot or just never considered the case I'm faced with.  As a result,
I often can't fix the code.

Systems where one isn't forced to consider what happens at each point,
don't force one to document the cases one specifically hasn't
considered, because the system will automagically do something and
that won't force the coder to think.  Which means I as a downstream
maintainer/user don't know whether the coder thought or not.  Thus,
the time they saved, is inflicted as a cost on me multi-fold, which is
part of the reason maintenance is expensive.

That 5 year old tool, I was mentioning before is now maintained by one
person part-time.  Not that he understands it completely--none of us
who worked on it ever understood it all.  However, we left careful
breadcrumbs about what we did and did not understand.  That means one
can generally fix things very locally, because one know exactly what
each part of the code expects to do (and not to do) and that really
helps when one is trying to determine if it is the code one is looking
at which is broken or is the error elsewhere.

I'm not a careful person.  I don't expect you to be.  I don't think
many creative people are.  However, if there is something simple and
relatively painless to do to make up for that carelessness, I will get
quite upset when you refuse to do it.  Static typing is like that.

"Picking up socks" is the canonical example.  One needs to
occassionally pick up ones socks not because we want a neat room, but
because eventually, having all our socks neat makes finding a clean
pair for going out on a date easier.  If you were my roommate and I
couldn't find a pair of socks for going out on a date, because your
socks were all over the apartment, I would be similarly distraught.

And, this can be taken just a little farther.  Perhaps, you are a
careful perswon and your code really is free of bugs that static
typing would have caught.  And, thus, perhaps, I shouldn't be shouting
at you.  However, you are surrounded by people who aren't careful and
who haven't produced bug-free code, and which static typing would have
made a little better, and who argue that if Ranier can code that way
(and he is great) I want to code that way too (and be great), but they
really aren't capable of it.  So, by allowing you to code without
checks and not complaining about it, I am implictly inflicting on
myself more coding without checks by people who really shouldn't be.
Therein lies the issue.  

It's just a like a speed limit sign. Sure we might all go safely a
little bit faster (and some of you (not me) might be safe driving
considerably faster).  However, statistically speed limits save lives,
and some people who shouldn't go fast (teens after drinking at prom
night are an oft cited example) really need them, and so we all need
to obey them.  It's not that I want to get to places more slowly.
However, I don't want idiots who have no concern for my welfare
preventing me from getting there at all.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87643t39yw.fsf@geddis.org>
Chris F Clark <···@shell01.TheWorld.com> wrote on Sun, 05 Aug 2007:
> I'm not a careful person.  I don't expect you to be.  I don't think
> many creative people are.  However, if there is something simple and
> relatively painless to do to make up for that carelessness, I will get
> quite upset when you refuse to do it.  Static typing is like that.

Rather than using guilt, or a moral argument, can't you just discuss the
actual benefits and drawbacks?  What benefits, on average, does static typing
provide?  What drawbacks does it inflict?  In which situation(s) do the
benefits outweigh the drawbacks?

Asserting that it is "obviously" the right approach for every programming
problem seems foolish.

> "Picking up socks" is the canonical example.  One needs to occassionally
> pick up ones socks not because we want a neat room, but because eventually,
> having all our socks neat makes finding a clean pair for going out on a
> date easier.  If you were my roommate and I couldn't find a pair of socks
> for going out on a date, because your socks were all over the apartment, I
> would be similarly distraught.

So your solution is to make it illegal to leave your house unless every
single sock has been picked up?  Otherwise you go to jail?  As though, there
could never possibly be anything more important in your life than having no
socks left on the ground at every moment.  Say, if there's a fire in your
house, or a home intruder, or perhaps your mom just arrived early at the
airport and needs to be picked up right away.  Nope, be sure you put away all
your socks _right_now_ before you leave the house; it must always be the top
priority of your life.

It's the REQUIREMENT that EVERY program MUST be provably free (by a limited
theorem prover) of compile-time type errors before ever executing the program
that annoys the dynamic typing fans.

> And, this can be taken just a little farther.  Perhaps, you are a careful
> perswon and your code really is free of bugs that static typing would have
> caught.  And, thus, perhaps, I shouldn't be shouting at you.  However, you
> are surrounded by people who aren't careful and who haven't produced
> bug-free code, and which static typing would have made a little better, and
> who argue that if Ranier can code that way (and he is great) I want to code
> that way too (and be great), but they really aren't capable of it.  So, by
> allowing you to code without checks and not complaining about it, I am
> implictly inflicting on myself more coding without checks by people who
> really shouldn't be.  Therein lies the issue.

Arguments like this led to Java.  That the average programmer is an idiot,
so programming languages need to be fascist and put requirements on you
"for your own good", whether you want them or not.

Some other, highly experienced, programmers, are instead searching for a tool
(a programming language) that allows them to most efficiently express their
computational thoughts.  Their concern is how productive the tool makes them.
They have zero interest in the tool imposing arbitrary requirements "for
their own good".

You should think carefully about whether that really is the intended benefit
of static type checks.  Most fans of such languages generally don't suggest
that the benefit is to reign in incompetent programmers.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Q: How many surrealists does it take to screw in a lightbulb?
A: Two.  One to hold the giraffe and the other to fill the
   bathtub with brightly colored machine tools.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2ytzrd2gsm.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Chris F Clark <···@shell01.TheWorld.com> wrote on Sun, 05 Aug 2007:
>> I'm not a careful person.  I don't expect you to be.  I don't think
>> many creative people are.  However, if there is something simple and
>> relatively painless to do to make up for that carelessness, I will get
>> quite upset when you refuse to do it.  Static typing is like that.
>
> Rather than using guilt, or a moral argument, can't you just discuss the


Not that you're quite innocent of that: I seem to remember a post
within the last two or three day where you accuse (I think it was)
Paul Rubin of scare tactics.

Regards -- Markus
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sdd8x8n41tf.fsf@shell01.TheWorld.com>
Don Geddis <···@geddis.org> writes:

> Chris F Clark <···@shell01.TheWorld.com> wrote on Sun, 05 Aug 2007:
>> I'm not a careful person.  I don't expect you to be.  I don't think
>> many creative people are.  However, if there is something simple and
>> relatively painless to do to make up for that carelessness, I will get
>> quite upset when you refuse to do it.  Static typing is like that.
>
> Rather than using guilt, or a moral argument, can't you just discuss the
> actual benefits and drawbacks?  What benefits, on average, does static typing
> provide?  What drawbacks does it inflict?  In which situation(s) do the
> benefits outweigh the drawbacks?
> 
> Asserting that it is "obviously" the right approach for every programming
> problem seems foolish.

I'm not asserting that it is right for "every" programming problem,
just Rainer's case where we had fixed categories of finite things,
categories that we might want to expand later on.  That's the case I
am trying to deal with (and I think it is an important case, one that
people don't think enough about because it seems "so simple").

I have the real example the 750 K SLOC in the tool I recently worked
on.  That was the product of 4 engineers (+ 1 QA person) over 5 years
time.  If you start doing the math, that's about 100 debugged lines of
code per day.  I don't normally write that much debugged code per day,
more like 20.  Therefore, I was producing about 5 times as much
working code as I normally do (and I know how much of that code was
mine, so I know that it wasn't someone else on the project
significantly skewing the results).

The real benefits of strong static typing were in having these very
simple to reason about objects that we could realiably expect them to
be what they said they were, and knowing that our "case anaylses" were
complete both before and after changes.  People may dismiss this as a
significant benefit, but I have experienced it and was more productive
as a result. Moreover, it is actually liberating to be able to dash
something off, and then know that the tool will catch you if you
omitted something.

A similar argument applies to logic minimization.  I can reliably
minimize small logic formulas by hand.  At the 6 to 8 variable stage,
I generally need to use a Karnaugh map.  However, with larger
equations, I simply use the Quine-McClusky algorithm (or a tool like
Espresso).  I would consider it unprofessional not to.  If I caught an
engineer working for me doing hand minimization of a 12 variable
expression, that engineer would have some serious explaining to do.  

I expect the same level of professionalism from people doing software.
If you are hand-crafting something that can be automated and made
rigorous, you need a good explanation if the problem is non-trivial.

Sure, it takes some time to get in the habit of defining types to
solve problems.  It took a while to learn multiplication too.
However, once the skill becomes second nature you can define the type
quickly enough that it doesn't matter. Just like it is quicker to
multiply rather than add (or to apply the QM algorithm in a program
rather than the algebra of Boolean logic by hand).

Finally, I think this is one case, which dynamic types make easy, but
easy in a false way.  A dynamic type frees one from having to consider
all the alternatives up front. However, where it falls down in not
providing a fail-safe way to check that you have considered all the
alternatives "in the end".  Now, one can argue when the end is, but
generally there is some point where we want to KNOW that the code is
correct.

My argument is that you can get your freedom up front in this case
with very little cost, often an emacs macro that takes only a key or
two to type to insert the stub code at the places where the type
system says you have an unconsidered alternative.  Then, you have all
the leisure of progressing knowing that you can find such stubs easily
and remove them when you understand what the code should really do at
those places.  Moreover, you KNOW that you haven't forgotten to put
such a stub in one of the places, because the type system prevents you
from omitting the stubs. Thus, you know when you have removed the
stubs, that you have (at least rudimentarily) considered what the code
should do for each of those places in each of those cases.  Moreover,
by diff'ing versuses the stubbed code, you can even see what your
implementation decisions were are easily review the changes you have
made.  Again, all in the confidence that you didn't fall prey to the
normal human fallacy of missing 1 case out of 47, because the type
system assured that you listed all 47 things in each relevant place.

It isn't a panacea, but strong static typing does solve the "long list
of finite alternatives" problem and it solves it well.  I don't know
of anything else that covers up for that human failing (of omitting a
case) for that particular problem (when there are finite cases) as
well.  Moreover, I believe that humans naturally break problems down
into finite numbers of cases, which means this problem occurs much
more often than people suspect. However, if you can show me another
way of achieving the same result that is not subject to human failure,
I will grant that comparable status (depending on other factors and I
may even like the other method better). In fact, for some cases, I do
have better solutions, for example listing all 47 cases together in
one place works better when you can do that.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjj7jpnhqob7@corp.supernews.com>
Don Geddis wrote:
> What benefits, on average, does static typing provide?

In decreasing order of importance:

. Better performance
. The ability to prove that a given piece of code cannot cause any run-time
errors
. Machine verified interface definitions
. Automates the inference of entire class hierarchies

> What drawbacks does it inflict?

In decreasing order of importance:

. More to learn
. More verbose when dynamic typing is needed
. Some formulations are not handled by the type system (e.g. polymorphically
recursive functions in OCaml) and must be translated into an "edible" form
by hand.

When you need dynamic types you have to write more code in a statically
typed language, e.g. remote procedure calls:

  server.getTemp('90210')                 Python
  server "getTemp" (`String "90210")      OCaml

In many other statically typed FPLs (Haskell, SML, F#), there are no
inferred open sum types equivalent to OCaml's polymorphic variants (the
`String in the above) so you must define your closed sum type for every
separate case and write it out by hand:

  type t =
    | Int of int
    | String of string
    | List of t list

  server "getTemp" (String "90210")

This can be a serious impediment when your type definition is significantly
verbose. Look at the Minim interpreter from this thread, for example. If
you choose to define the type of an expression in OCaml, as you must in
Haskell, it takes up a lot of space!

type 'var value =
  | Int of int
  | Var of 'var

type 'var test =
  | Less of 'var value * 'var value
  | Equal of 'var value * 'var value
  | Greater of 'var value * 'var value
  | And of 'var test * 'var test
  | Or of 'var test * 'var test
  | Not of 'var test

type ('var, 'tag) statement =
  | Assign of 'var * 'var value
  | Incr of 'var
  | Decr of 'var
  | If of 'var test * ('var, 'tag) statement * ('var, 'tag) statement
  | Goto of 'tag
  | Tag of 'tag
  | PrintString of string
  | Print of 'var
  | Input of 'var

type program = (string, string) statement list

There are two other important points:

. Exploratory programming
. Symbols

OCaml is the only modern statically-typed FPL to address both of these
issues and it does so with a single beautiful language feature: polymorphic
variants.

These are inferred sum types that may be open or closed. As they are
inferred, there are no type definitions to rewrite when you evolve your
code and because the sum types can be left open you can extend them
arbitrarily.

> In which situation(s) do the benefits outweigh the drawbacks?

Until 15 years ago I used dynamic or untyped languages exclusively. I was
intimately familiar with long-running programs dying from trivial errors
near completion and losing or corrupting my work, and of programming in the
debugger as a desperate attempt to prevent that from being too
catastrophic.

Then I learned C and that took up half of my coding. Although C had a static
type system and the excellent Norcroft compiler caught many errors, it was
still substantially more tedious to write code than the dynamic languages I
knew, so it did not displace the dynamic languages.

Seven years ago I started my PhD and wrote everything in a mix of C++ and
Mathematica. At the time, I loved the way C++ was fast and provided both
generic containers and static checking and I loved the way Mathematica's
dynamic approach made it easy to evolve solutions.

Among my friends, I was one of the last people to learn OCaml. I started
learning OCaml four years ago. This language really brought together all of
the features that I loved from many different languages and displaced all
of the others. So my use of dynamic typing has dropped to almost zero.

The only place I use dynamic typing now is term rewriting in Mathematica.
Even there, I move all non-trivial coding into a statically typed language
like OCaml as soon as possible because development is so much faster.

My position is strongly biased, of course, but I am only aware of large
players choosing OCaml as a foundation for their work and not Lisp. Intel
use OCaml to verify their CPU designs. Microsoft use OCaml to verify their
drivers and are borrowing heavily from OCaml in their F# language for .NET
which is already widely used inside Microsoft for everything from XBox to
Live AdCenter. And so on...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-E4FD5D.16172408082007@news-europe.giganews.com>
In article <··············@corp.supernews.com>,
 Jon Harrop <···@ffconsultancy.com> wrote:

> Don Geddis wrote:
> > What benefits, on average, does static typing provide?
> 
> In decreasing order of importance:
> 
> . Better performance
> . The ability to prove that a given piece of code cannot cause any run-time
> errors

It has been explained to you that the above is wrong. Why do you
ignore it?

Simple example:

write a function that allocates memory. The type system
will happily allow you allocate as much memory
as you want until you get an out of memory error.

Sure, you can restrict the allocation of memory, but the
type system will not do anything automatically for you.
If you write that code, it will not complain. 

So, 'ability to prove that a given piece of code cannot cause
any run-time errors' is just bullshit, like so much from you. 

...

> My position is strongly biased, of course, but I am only aware of large
> players choosing OCaml as a foundation for their work and not Lisp. Intel
> use OCaml to verify their CPU designs. Microsoft use OCaml to verify their
> drivers and are borrowing heavily from OCaml in their F# language for .NET
> which is already widely used inside Microsoft for everything from XBox to
> Live AdCenter. And so on...

You live in a dream world. Wake up.

-- 
http://lispm.dyndns.org
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfbkde7.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.functional.]
On Wed, 08 Aug 2007 14:53:40 +0100, Jon Harrop <···@ffconsultancy.com>
wrote:
> Don Geddis wrote:
>> What benefits, on average, does static typing provide?
>
> In decreasing order of importance:
>
> . Better performance
> . The ability to prove that a given piece of code cannot cause any run-time
> . errors

PLease show me the static type checking system that given a piece of
code it can prove that the code is free of run-time errors.  I can't
see how you could pull this trick off without solving the halting
problem.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-5CEC2D.21105805082007@news-europe.giganews.com>
In article <···············@shell01.TheWorld.com>,
 Chris F Clark <···@shell01.TheWorld.com> wrote:

> Rainer Joswig <······@lisp.de> writes:
> 
> > In article <···············@shell01.TheWorld.com>,
> >  Chris F Clark <···@shell01.TheWorld.com> wrote:
> >
> >> Rainer Joswig <······@lispmachine.de> writes:
> >> 
> >> > No type system will catch a missing clause to deal with
> >> > some input data.
> >> 
> >> Actually, that's exactly what type systems are for.  One designs the
> >> type system to catch the types of input data that will be supplied,
> >> and the type system will make certain that all of your pattern
> >> matching statements have code that addresses each of those
> >> circumstances.  One can then quickly inspect each of those statements
> >> to validate that they do the correct thing (and use that knowledge to
> >> construct tests that give you good coverage).  
> >> 
> >> But one of the key things that the type system has bought you is the
> >> exhaustivity check, that you have written code to cover all the cases.
> >> 
> >> It's also simpler to inspect that the type system is complete, because
> >> the type system is simpler (and more abstract) than the code which
> >> uses the types to make decisions.
> >
> > Your program is correct as of 10:30am. 10:35am there is a new
> > part number range given to a new machine model. What now?
> 
> At 10:35, you add the new part numbers to the type system.  If you are
> interested in a quick-and-dirty fix, you patch all the places the
> system indicates type errors with code, that says "not supported yet."
> In fact, if it is something that happens often, you have a tool that
> adds a suitable default to the patterns that does that for you which
> you use for prototyping--running with this is no different than
> dynamic typing, you simply get run-time errors.

The customer does not want a new software every
time he has a new machine model. Would be good for you,
though. You could bill him every release.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dsabt4lobo.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <···············@shell01.TheWorld.com>,
>  Chris F Clark <···@shell01.TheWorld.com> wrote:
>
>> Rainer Joswig <······@lisp.de> writes:
>> 
>> > In article <···············@shell01.TheWorld.com>,
>> >  Chris F Clark <···@shell01.TheWorld.com> wrote:
>> >
>> >> Rainer Joswig <······@lispmachine.de> writes:
>> >> 
>> >> > No type system will catch a missing clause to deal with
>> >> > some input data.
>> >> 
>> >> Actually, that's exactly what type systems are for.  One designs the
>> >> type system to catch the types of input data that will be supplied,
>> >> and the type system will make certain that all of your pattern
>> >> matching statements have code that addresses each of those
>> >> circumstances.  One can then quickly inspect each of those statements
>> >> to validate that they do the correct thing (and use that knowledge to
>> >> construct tests that give you good coverage).  
>> >> 
>> >> But one of the key things that the type system has bought you is the
>> >> exhaustivity check, that you have written code to cover all the cases.
>> >> 
>> >> It's also simpler to inspect that the type system is complete, because
>> >> the type system is simpler (and more abstract) than the code which
>> >> uses the types to make decisions.
>> >
>> > Your program is correct as of 10:30am. 10:35am there is a new
>> > part number range given to a new machine model. What now?
>> 
>> At 10:35, you add the new part numbers to the type system.  If you are
>> interested in a quick-and-dirty fix, you patch all the places the
>> system indicates type errors with code, that says "not supported yet."
>> In fact, if it is something that happens often, you have a tool that
>> adds a suitable default to the patterns that does that for you which
>> you use for prototyping--running with this is no different than
>> dynamic typing, you simply get run-time errors.
>
> The customer does not want a new software every
> time he has a new machine model. Would be good for you,
> though. You could bill him every release.

Guess what: Neither in statically typed language, nor in a dynamically
typed language the permissibility of a part number _range_ would be
built into the program and certainly not be type checked
statically. Why should it? Again you completely misunderstand what
static typing is good for (not to replace all conditionals, certainly
...) and sadly Chris has been falling for it. Interestingly you
haven't answered my question how you would do it in Lisp.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <y3zm1661yq.fsf@hod.lan.m-e-leypold.de>
Chris F Clark wrote:

> Rainer Joswig <······@lispmachine.de> writes:
>
>> No type system will catch a missing clause to deal with
>> some input data.
>
> Actually, that's exactly what type systems are for.  One designs the

I recall that statements to that effect have been repeteadly ignored
before.

Regards -- Markus
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ps22it2b.fsf@dnsalias.com>
Rainer Joswig <······@lispmachine.de> writes:
> In article <············@online.de>, Joachim Durchholz <··@durchholz.org> wrote:
>> With a Hindley-Milner type system, both domain of things that can be 
>> represented in the type system is larger (70%? 90%? dunno really), and 
>> writing down the types is far less work.
>
> I think a number like 70% is an illusion.

Perhaps depends on the programmer?  Quite a few years ago I counted
*every* error I made while writing a non-trivial program in Scheme.
Approximately 30% of the errors would have been caught by the type
checker had I written in SML or Haskell.  I don't claim that number is
typical only that it was (carefully) measured.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87y7gp3eez.fsf@ma-patru.mathematik.uni-karlsruhe.de>
·······@dino.dnsalias.com (Stephen J. Bevan) writes:

> Perhaps depends on the programmer?  Quite a few years ago I counted
> *every* error I made while writing a non-trivial program in Scheme.
> Approximately 30% of the errors would have been caught by the type
> checker had I written in SML or Haskell.  I don't claim that number is
> typical only that it was (carefully) measured.

It surely depends on programmers and tools.  I once was very impressed by a
colleague who wrote a non-trivial function in C that compiled without any
problem.  And my experience is also that about 70% of my errors when
writing Common Lisp code are function signature and type mismatches which
are easily found by following the warnings during compilation (especially
using CMUCL, SCBL).  Maybe some of you are barking up the wrong tree?  CL
is not Scheme, some CL implementations can find a lot of errors at compile
time, and it is obvious that much more could be done on the implementation
side while still remaining faithful to the CL standard.

Nicolas
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <nsodhl2gcn.fsf@hod.lan.m-e-leypold.de>
Nicolas Neuss wrote:

> using CMUCL, SCBL).  Maybe some of you are barking up the wrong tree?  CL

Maybe some of us do. But maybe also some of those that have been
ranting now for some time how absolutely cumbersome type systems are
also do. I'd like you to note that (as a I see it) some of the less
realistic claims what type systems can do and are supposed to do came
from the _opponents_ of static typing. In this subthread, though,
we've been discussing how many error a static type system can catch: I
don't think a number > 50% is unrealistic. You should note though,
that it depends on when you start to count errors (every one the
programmer entered, including typos and incomplete expressions
vs. errors that are more in the domain of classical QA and/or perhaps
made it to the version control already) and that we don't cleim that
those errors could not have been caught by other methods as well
(including unit tests, rereading your own code or a formal code
review).

Regards -- Markus
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.06.09.05.46.111795@areilly.bpc-users.org>
On Mon, 06 Aug 2007 10:54:16 +0200, Markus E.L. 2 wrote:

> 
> Nicolas Neuss wrote:
> 
>> using CMUCL, SCBL).  Maybe some of you are barking up the wrong tree?  CL
> 
> Maybe some of us do. But maybe also some of those that have been
> ranting now for some time how absolutely cumbersome type systems are
> also do. I'd like you to note that (as a I see it) some of the less
> realistic claims what type systems can do and are supposed to do came
> from the _opponents_ of static typing. In this subthread, though,
> we've been discussing how many error a static type system can catch: I
> don't think a number > 50% is unrealistic. You should note though,
> that it depends on when you start to count errors (every one the
> programmer entered, including typos and incomplete expressions
> vs. errors that are more in the domain of classical QA and/or perhaps
> made it to the version control already) and that we don't cleim that
> those errors could not have been caught by other methods as well
> (including unit tests, rereading your own code or a formal code
> review).

Or just running the code on your representative data set.  That takes
about as much time as compiling the program on some systems, and catches
most of the same sorts of errors.  Once you're down on your typos and dumb
type errors (yes, sometimes I pass a hash table to a function expecting a
list or vector, but I'm still green at this game) what you've got left is
pretty much logic errors, and that's not going to go away with a type
system.

Cheers,

-- 
Andrew
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186397408.208827.296780@d55g2000hsg.googlegroups.com>
On 6 Aug., 11:05, Andrew Reilly <···············@areilly.bpc-
users.org> wrote:

> Once you're down on your typos and dumb
> type errors (yes, sometimes I pass a hash table to a function expecting a
> list or vector, but I'm still green at this game) what you've got left is
> pretty much logic errors, and that's not going to go away with a type
> system.

This is not so absolutely true as you might think.
Please look at http://perl.plover.com/yak/typing/ (and then at slides
27 to 31), where a well known perl guru tells us a story about his
experience with ML and how the type system hinted him at an infinite
loop error.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sz3aywsrzx.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 6 Aug., 11:05, Andrew Reilly <···············@areilly.bpc-
> users.org> wrote:
>
>> Once you're down on your typos and dumb
>> type errors (yes, sometimes I pass a hash table to a function expecting a
>> list or vector, but I'm still green at this game) what you've got left is
>> pretty much logic errors, and that's not going to go away with a type
>> system.
>
> This is not so absolutely true as you might think.
> Please look at http://perl.plover.com/yak/typing/ (and then at slides
> 27 to 31), where a well known perl guru tells us a story about his
> experience with ML and how the type system hinted him at an infinite
> loop error.

Oh, yes: That is a common experience that recursions that are not
closed (I mean, don't have a case that doesn't recur) end in a
recursive type or something that cannot be typed. Actually I admit
that at the moment such a case is bothering me (I'm writing a program
that passes around a lot of continuations in various data types), but:
Yes I have a problem there, but it can be fixed and I'm quite sure it
was a good thing that type inference barfed at me there: I had a
conceptual problem which I didn't fully realize (actually I couldn't
completely answer the question "What are you passing around here?").

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ra7io8ss6r.fsf@hod.lan.m-e-leypold.de>
Andrew Reilly wrote:

> On Mon, 06 Aug 2007 10:54:16 +0200, Markus E.L. 2 wrote:
>
>> 
>> Nicolas Neuss wrote:
>> 
>>> using CMUCL, SCBL).  Maybe some of you are barking up the wrong tree?  CL
>> 
>> Maybe some of us do. But maybe also some of those that have been
>> ranting now for some time how absolutely cumbersome type systems are
>> also do. I'd like you to note that (as a I see it) some of the less
>> realistic claims what type systems can do and are supposed to do came
>> from the _opponents_ of static typing. In this subthread, though,
>> we've been discussing how many error a static type system can catch: I
>> don't think a number > 50% is unrealistic. You should note though,
>> that it depends on when you start to count errors (every one the
>> programmer entered, including typos and incomplete expressions
>> vs. errors that are more in the domain of classical QA and/or perhaps
>> made it to the version control already) and that we don't cleim that
>> those errors could not have been caught by other methods as well
>> (including unit tests, rereading your own code or a formal code
>> review).
>
> Or just running the code on your representative data set.  That takes

Yes. That's testing.

> about as much time as compiling the program on some systems, and catches
> most of the same sorts of errors.  

No, not quite. The benefits of static typeing and tests overlap but
are nonetheless not identical. Furthermore I dare to suggest that the
results of testing (especially failures) are easier to interpret with
programs that could, in principle, be statically typed.

> Once you're down on your typos and dumb
> type errors 

> (yes, sometimes I pass a hash table to a function expecting a
> list or vector, but I'm still green at this game) 

> what you've got left is
> pretty much logic errors, and that's not going to go away with a type
> system.

Yes, but type errors costitute actually the majority of problems. I
like to have them off the table and not clutter up my tesing results
(that is: Testing is ddone and results are interpreted knowing that
typing is already OK).

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <mb4pje7gl1.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <············@online.de>,
>  Joachim Durchholz <··@durchholz.org> wrote:
>
>> Rainer Joswig schrieb:
>> > In article <··············@ruckus.brouhaha.com>,
>> >> Your argument seems to be that static types can't eliminate 100% of
>> >> program errors, and as long as they can eliminate only 99.999% then
>> >> they're useless.
>> > 
>> > How about something between 1% to 10% of program errors?
>> 
>> That figure is grossly underrating the power of static typing. Even in 
>> Pascal (and similar primites+arrays+records languages), the type system 
>> can eliminate more than 50% of errors; the trade-off is bad because 
>> writing down the types is such a nuisance, not because there the win is 
>> negligible.
>> 
>> With a Hindley-Milner type system, both domain of things that can be 
>> represented in the type system is larger (70%? 90%? dunno really), and 
>> writing down the types is far less work.
>> 
>> Regards,
>> Jo
>
> I think a number like 70% is an illusion.
>
> 90% of the errors are semantic errors, logic errors,

90% of which errors? Of tjose that make it into the product? Or those
that occur during development?

- M
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f941cp$4mf$1@online.de>
Rainer Joswig schrieb:
>  Joachim Durchholz <··@durchholz.org> wrote:
> 
>> With a Hindley-Milner type system, both domain of things that can be 
>> represented in the type system is larger (70%? 90%? dunno really), [...]
> 
> I think a number like 70% is an illusion.
> 
> 90% of the errors are semantic errors, logic errors,
> not completely implementing the
> spec errors, approximations, accumulating float errors,
> holes, partial implementations, inconsistencies,
> not matching non-functional requirements, errors
> in the compiler, errors in interfacing surrounding software,
> errors dealing with complex runtime situations
> (network connection, data transfer failures, data
> corruption, ...) and so on.

Actually, Hindley-Milner type checking can catch a surprisingly large 
fraction of semantic erors.

> No type system will catch a missing factor in a formular.
> No type system will catch a missing clause to deal with
> some input data.

Right.

> No type system will catch logic error (is it 'and' or 'or')?.

Not necessarily. If the 'and' is inside a pattern match, it will alert 
you to problems if you haven't covered all cases, or if you have 
overlapping cases.
If you program mostly with pattern matching, you'll end up with most 
conditionals in patterns and get >90% coverage for misconstructed 
boolean expressions.

> No type system will make your code meet a certain real-time
> requirement.

Right.

> No type system will make your system will make your UI
> layout correct, so all elements are visible.

Well, it could at least ensure partial correctness in the sense that all 
controls are visible that should be visible. This isn't usually done, of 
course.
It cannot control overlaps. That's an assertion that can be established 
in the standard library though.

> Plus the type system adds a certain amount of errors, that were
> not errors without it.

Not sure what you mean with that.

> I'm NOT saying that static type checking isn't useful, but
> getting rid of 70% of programmer errors is totally
> unrealistic.

I think it depends on the style of programming.

If you program in a language with run-time type checking all day, you 
develop strategies to avoid type errors. Since I don't program in these, 
I don't know exactly what kind of strategies this is, but I could 
imagine any combination of refraining from too much nesting or 
interlinking for data structures, avoiding too high orders, and making 
the code degrade gracefully if a wrong type comes along. (Not all of 
these techniques would be applied at all levels of expertise, I'd say - 
a HOF jock could write an order-10 function in Lisp without getting 
confused, but an intermediate programmer might be grateful if the 
compiler told it when he got the call wrong.)
If, on the other hand, you live with a statically-typed language, you 
concentrate more on those errors that are not caught by the compiler, so 
the type error rate goes up, hence the error detection rate improves.

IOW I'd say that high error detection rates are partly an artifact.
OTOH I'd say that everything that limits the set of errors that you have 
to concentrate on is a Good Thing. Static typing is one such technique; 
libraries with narrow and fool-proof interfaces are another; automatic 
array-bounds checking a third; etc. etc. etc.

Regards,
Jo
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <wzmyx6adwn.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@ruckus.brouhaha.com>,
>  Paul Rubin <·············@NOSPAM.invalid> wrote:
>
>> Rayiner Hashem <·······@gmail.com> writes:
>> > There is no free lunch. A program is a specification of behavior. A
>> > type model is a specification of constraints on that behavior. Any
>> > type-model sophisticated enough to fully constrain the behavior of a
>> > program will be no different from a programming language, and hence
>> > just as susceptible to error. Your "sufficiently smart type system"
>> > will need unit tests to ensure the correctness of your type models.
>> 
>> By that logic, sufficiently thorough unit tests will need unit tests
>> of their own as well.  It never stops.
>> 
>> Your argument seems to be that static types can't eliminate 100% of
>> program errors, and as long as they can eliminate only 99.999% then
>> they're useless.
>
> How about something between 1% to 10% of program errors?

More I think, considering that a large number of errors in
almost-untyped-languages like C are actually

  - forgotten part of an expression
  - typos (Mistyping of symbols, forgotten parenthesis)

Of course a part of them is caught in a dynamic type system at compile
too, but the difference between

  (car (cdr(

and

  (cdr (car(

certainly is not. And this are the most common error (most don't make
it to the product, but it's better to catch them at the first build
attempt than at the unit test 3 hours later: Programmers can't switch
contexts arbitrarily).

Regards -- Markus
  

   
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f90cl0$f62$1@aioe.org>
Paul Rubin escreveu:
> Rayiner Hashem <·······@gmail.com> writes:
>> There is no free lunch. A program is a specification of behavior. A
>> type model is a specification of constraints on that behavior. Any
>> type-model sophisticated enough to fully constrain the behavior of a
>> program will be no different from a programming language, and hence
>> just as susceptible to error. Your "sufficiently smart type system"
>> will need unit tests to ensure the correctness of your type models.
> 
> By that logic, sufficiently thorough unit tests will need unit tests
> of their own as well.  It never stops.
> 
> Your argument seems to be that static types can't eliminate 100% of
> program errors, and as long as they can eliminate only 99.999% then
> they're useless.

I read it differently: "there is no evidence it can eliminate anything 
that near the purported 99.999%"
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <00ejiiadgg.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Paul Rubin escreveu:
>> Rayiner Hashem <·······@gmail.com> writes:
>>> There is no free lunch. A program is a specification of behavior. A
>>> type model is a specification of constraints on that behavior. Any
>>> type-model sophisticated enough to fully constrain the behavior of a
>>> program will be no different from a programming language, and hence
>>> just as susceptible to error. Your "sufficiently smart type system"
>>> will need unit tests to ensure the correctness of your type models.
>> By that logic, sufficiently thorough unit tests will need unit tests
>> of their own as well.  It never stops.
>> Your argument seems to be that static types can't eliminate 100% of
>> program errors, and as long as they can eliminate only 99.999% then
>> they're useless.
>
> I read it differently: "there is no evidence it can eliminate anything
> that near the purported 99.999%"

So? If it eliminates only 70% and make discovery of simple errors like
typos or badly formed expressions faster and easier, then we say
"sorry, that's not enough, we prefer to live w/o it, since we can't
get it all?".

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9603g$r29$2@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Paul Rubin escreveu:
>>> Rayiner Hashem <·······@gmail.com> writes:
>>>> There is no free lunch. A program is a specification of behavior. A
>>>> type model is a specification of constraints on that behavior. Any
>>>> type-model sophisticated enough to fully constrain the behavior of a
>>>> program will be no different from a programming language, and hence
>>>> just as susceptible to error. Your "sufficiently smart type system"
>>>> will need unit tests to ensure the correctness of your type models.
>>> By that logic, sufficiently thorough unit tests will need unit tests
>>> of their own as well.  It never stops.
>>> Your argument seems to be that static types can't eliminate 100% of
>>> program errors, and as long as they can eliminate only 99.999% then
>>> they're useless.
>> I read it differently: "there is no evidence it can eliminate anything
>> that near the purported 99.999%"
> 
> So? If it eliminates only 70% and make discovery of simple errors like
> typos or badly formed expressions faster and easier, then we say
> "sorry, that's not enough, we prefer to live w/o it, since we can't
> get it all?".

No Markus, we believe 70% still it's being overrated :-)
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dp7io92eni.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Paul Rubin escreveu:
>>>> Rayiner Hashem <·······@gmail.com> writes:
>>>>> There is no free lunch. A program is a specification of behavior. A
>>>>> type model is a specification of constraints on that behavior. Any
>>>>> type-model sophisticated enough to fully constrain the behavior of a
>>>>> program will be no different from a programming language, and hence
>>>>> just as susceptible to error. Your "sufficiently smart type system"
>>>>> will need unit tests to ensure the correctness of your type models.
>>>> By that logic, sufficiently thorough unit tests will need unit tests
>>>> of their own as well.  It never stops.
>>>> Your argument seems to be that static types can't eliminate 100% of
>>>> program errors, and as long as they can eliminate only 99.999% then
>>>> they're useless.
>>> I read it differently: "there is no evidence it can eliminate anything
>>> that near the purported 99.999%"
>> So? If it eliminates only 70% and make discovery of simple errors
>> like
>> typos or badly formed expressions faster and easier, then we say
>> "sorry, that's not enough, we prefer to live w/o it, since we can't
>> get it all?".
>
> No Markus, we believe 70% still it's being overrated :-)

As I posted elsewhere: Depends on how you count and actually other
people (like Jo Durchholz who is not known for exaggerations) report
similar experiences. There was AFAIR even a post yesterday where
someone once logged the errors found and reports also a similar order
of magnitude. So, believ what you want: I do think that verything
above 10% IS worth the trouble. The actual number is almost certainly
higher.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hh40rF3katv8U1@mid.individual.net>
Ingo Menger wrote:

>> I just want to inform you about the fact that there are people who care
>> about the programs that static type systems cannot check and who
>> understand what the underlying issues are.
> 
> I respect that, because I can imagine that such programs really exist.
> 
> But when I hear statements like "the type system disturbs me in my
> usual code-some-idea-and-test-it cycle", or the like then I have the
> impression that certain people can't see how development in a strongly
> typed language is like.

This cuts both ways.

> It's like those FORTRAN IV programmers that
> refuse(d*) to adapt to new languages because there was no FORMAT
> statement, no computed GOTO and no COMMON block with EQUIVALENCE
> clauses.

These are straw men.

If you can define a type system that lets the following program through, 
then I'm in. If you cannot make it acceptable, I am not interested. [1]

(defclass person ()
   ((name :initarg :name :reader person-name)))

(defvar *pascal* (make-instance 'person :name "Pascal"))

(defun test ()
   (eval (read))
   (print (person-name *pascal*))
   (print (person-address *pascal*)))



Pascal

[1] Yes, I know this is an extreme and possibly even pathological case. 
But it is the most compact example I can think of that illustrates what 
is necessary to make upgradable software systems work.

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sdd8x8sv15x.fsf@shell01.TheWorld.com>
Pascal Costanza <··@p-cos.net> writes:

> If you can define a type system that lets the following program
> through, then I'm in. If you cannot make it acceptable, I am not
> interested. [1]
>
> (defclass person ()
>   ((name :initarg :name :reader person-name)))
>
> (defvar *pascal* (make-instance 'person :name "Pascal"))
>
> (defun test ()
>   (eval (read))
>   (print (person-name *pascal*))
>   (print (person-address *pascal*)))
>
>
>
> Pascal
>
> [1] Yes, I know this is an extreme and possibly even pathological
> case. But it is the most compact example I can think of that
> illustrates what is necessary to make upgradable software systems work.

I can understand your desire to make a working upgradable software
system.  I use "one" all the time, it's called emacs.  However, it is
also what makes me despise dynamic type systems.  There is code in
emacs which simply doesn't work correctly because there are functions
which don't interact properly because they are incorrectly specified
in a way that they wouldn't be incorrectly specified if the authors of
such functions had to use correct types and not simply untyped lists
of things where certain elements were assumed to hold certain values
(and where the code is inconsistent about which locations are used to
hold those values).  The lack of strong types make such programs
brittle and non-robust, right at the point where dynamic typing should
make them robust.

If humans were flawless and the code we wrote was provably complete
and consistent, we wouldn't need strong types.  However, we aren't,
which makes us desire non-strong types at exactly the points where we
need them most.

If you personally want to come over and fix every error in emacs when
a null is caught in some place where it shouldn't be possible, then I
will never complain about your use of lisp.  Until that time, don't be
surprised when advocates of a more controlled development methodology
complain when they find code written by your cohorts to be lacking
when that methodology (in this case strong typing) could have
prevented bugs in that code.  The fact is that for every genius who is
empowered by an advanced feature there are hundreds on incompetents
who abuse the feature to write trash which is foisted up the rest of
us.

Note, I think the idea that lisp macros can be used to prevent bugs
actually quite appealing, and I have nothing against s-expression
syntax and personally find the "static functional programming" syntax
of the ML/Haskell languages too terse and cryptic at important points.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186174013.784239.241970@q75g2000hsh.googlegroups.com>
> If you personally want to come over and fix every error in emacs when
> a null is caught in some place where it shouldn't be possible, then I
> will never complain about your use of lisp.  Until that time, don't be
> surprised when advocates of a more controlled development methodology
> complain when they find code written by your cohorts to be lacking
> when that methodology (in this case strong typing) could have
> prevented bugs in that code.

Your argument is completely illogical.

1) You conflate the faults of Emacs Lisp with the faults of dynamic
typing in general. Static typing wouldn't have fixed the problems in
Emacs, because then people just would've used statically-typed lists
with implicit structure. You see, the problem in Emacs Lisp isn't that
its dynamically typed, but that it has no way to create structured,
user-defined types, dynamic or otherwise!

2) You assume static typing would've fixed the problems in Emacs, but
you completely ignore the fact that proper testing would've done the
same thing.

3) You extrapolate from the single data-point of Emacs to make all
sorts of wild claims. Remember, Emacs Lisp is an extension language
for a text editor. Moreover, Emacs extensions are usually scripts,
programmed with a small time investment, often by users and not
experienced Emacs developers. You can't extrapolate from the quality
of Emacs to the quality of programs written in dynamically-typed
languages in general.

Now, maybe if you could show me the statically-typed equivalent of
Emacs, written in a language equally primitive (but with a static type
system), and then show that a script thrown together in an evening by
Joe user for SuperStaticEmacs is better than the same thing in regular
Emacs, maybe you'd have a point!
From: Kaz Kylheku
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186175674.457116.200800@22g2000hsm.googlegroups.com>
On Aug 3, 1:00 pm, Chris F Clark <····@shell01.TheWorld.com> wrote:
> Pascal Costanza <····@p-cos.net> writes:
> > If you can define a type system that lets the following program
> > through, then I'm in. If you cannot make it acceptable, I am not
> > interested. [1]
>
> > (defclass person ()
> >   ((name :initarg :name :reader person-name)))
>
> > (defvar *pascal* (make-instance 'person :name "Pascal"))
>
> > (defun test ()
> >   (eval (read))
> >   (print (person-name *pascal*))
> >   (print (person-address *pascal*)))
>
> > Pascal
>
> > [1] Yes, I know this is an extreme and possibly even pathological
> > case. But it is the most compact example I can think of that
> > illustrates what is necessary to make upgradable software systems work.
>
> I can understand your desire to make a working upgradable software
> system.  I use "one" all the time, it's called emacs.  However, it is
> also what makes me despise dynamic type systems.  There is code in
> emacs which simply doesn't work correctly because there are functions
> which don't interact properly because they are incorrectly specified
> in a way that they wouldn't be incorrectly specified if the authors of
> such functions had to use correct types and not simply untyped lists
> of things where certain elements were assumed to hold certain values
> (and where the code is inconsistent about which locations are used to
> hold those values).  The lack of strong types make such programs
> brittle and non-robust, right at the point where dynamic typing should
> make them robust.
>
> If humans were flawless and the code we wrote was provably complete
> and consistent, we wouldn't need strong types.

Dynamic types can be strong (reliably checked, inviolable) or weak
(unchecked, or defeatable).

Static isn't the same thing as strong. Static typing can also be
strong (reliably checked, inviolable) or weak (unchecked, or
defeatable).

C's static typing is weak because it's defeatable (type-punning casts)
and in some cases unchecked (like a conversion of some pointer to void
*, and then back to a different pointer type, with no casts or
diagnostics).

Weak typing in a dynamically language could be from lack of checking.
E.g someone's  ``one-weekend'' interpreter that has no error checking,
so that applying a string-length function to an integer variable
simply blows up at run-time. Weakness could also exist in the form of
defeated checking: e.g. in an advanced dynamic language, writing a
declaration that some variable holds a particular type, and declaring
safety to be unimportant.

So there is the strong-weak axis and the static-dynamic axis. Pick
your coordinates.

Lastly, note that dynamic typing is not incompatible with static
checking.  Some type errors in dynamically typed programs can be
statically detected, and there are dynamically typed programs which
are not checkable: they are correct, yet this cannot be concluded
statically.

  However, we aren't,
> which makes us desire non-strong types at exactly the points where we
> need them most.
>
> If you personally want to come over and fix every error in emacs when
> a null is caught in some place where it shouldn't be possible, then I
> will never complain about your use of lisp.  Until that time, don't be
> surprised when advocates of a more controlled development methodology
> complain when they find code written by your cohorts to be lacking
> when that methodology (in this case strong typing) could have
> prevented bugs in that code.

You are discussing these alleged Emacs program errors without knowing
whether or not they belong into the category that could have been
caught by a static analysis.

That the tool to do so doesn't exist is an observation about the
maturity of the Emacs implementation of Lisp (which is based on an
outdated Lisp dialect anyway).

So in other words, the static typing methodology (in one of its most
advanced forms, that being type inferencing) could perhaps be applied
to these programs without modification.

The problem with static typing, if it is mandatory rather than merely
advisory, is that it sometimes prevents errors by preventing an entire
class of programs from being run. Proponents of dynamic languages find
that unacceptable, because they sometimes write useful programs within
that forbidden class.

Mandatory static typing concludes that a program which cannot be
proven in some bounded time to be free of type errors is in fact
erroneous, causing the program to be rejected. Or causing it to be
effectively rejected: some executable form is retained, but contains
no further safety net beyond the static check so that ``all bets are
off'' if it is run.

The best of all worlds is a combination of static and dynamic.
Thorough analysis is applied to a program. If it cannot conclude that
the program is correct, but finds no errors, a diagnostic to that
effect is emitted. The user can then choose to run the program
anyway.  If that is done, there is still the safety net of robust run-
time checking which catches any type-mismatch error as a proper
exception.

Only sections of code where safety is deliberately turned off for
performance are completely unchecked.

My last remark on this topic is that typing isn't just for error
checking. Error checking isn't even the interesting application of
types. The need to have run-time classification of values into type
categories is so pervasive, that it's frequently implemented even
where the language support is lacking. And when that happens, it's
usually implemented in an ad-hoc, slow, bug-ridden way, with poor,
inconsistent run-time error checking.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hjatpF3jkjdmU1@mid.individual.net>
Chris F Clark wrote:
> Pascal Costanza <··@p-cos.net> writes:
> 
>> If you can define a type system that lets the following program
>> through, then I'm in. If you cannot make it acceptable, I am not
>> interested. [1]
>>
>> (defclass person ()
>>   ((name :initarg :name :reader person-name)))
>>
>> (defvar *pascal* (make-instance 'person :name "Pascal"))
>>
>> (defun test ()
>>   (eval (read))
>>   (print (person-name *pascal*))
>>   (print (person-address *pascal*)))
>>
>>
>>
>> Pascal
>>
>> [1] Yes, I know this is an extreme and possibly even pathological
>> case. But it is the most compact example I can think of that
>> illustrates what is necessary to make upgradable software systems work.
> 
> I can understand your desire to make a working upgradable software
> system.  I use "one" all the time, it's called emacs.  However, it is
> also what makes me despise dynamic type systems. 

I don't know the specifics of emacs's type system, so I cannot comment
on this.

> If you personally want to come over and fix every error in emacs when
> a null is caught in some place where it shouldn't be possible, then I
> will never complain about your use of lisp.  Until that time, don't be
> surprised when advocates of a more controlled development methodology
> complain when they find code written by your cohorts to be lacking
> when that methodology (in this case strong typing) could have
> prevented bugs in that code.  The fact is that for every genius who is
> empowered by an advanced feature there are hundreds on incompetents
> who abuse the feature to write trash which is foisted up the rest of
> us.

I wouldn't have anything against static type checking if there were a
static type system that would accept the kinds of programs I am
interested in. Of course, anything that prevents errors from occurring
in the first place is useful. The example above, though, shows that this
is probably not possible. As I said, there are always valid programs
that static type systems have to reject, and the above example _is_ a
valid program in Common Lisp that can be successfully executed.

If you can come up with a static type system that handles such cases, I
am in. But I have strong doubts that you will be able to deliver, so I
am not holding my breath. I don't want to bother anymore working around
limitations that static type systems impose on me.

Again, this is my personal subjective perspective. I don't have any
problems if you have a different perspective. It may be that upgradable
systems don't play an important role in the long run, so it could turn
out that I have just wasted my time. But what is important is that your
perspective is just as personal and subjective as any other in such
discussions, and it may turn out that it is the statically checked
systems that will not play an important role in the long run. [1]

We will only know in a few hundred years from now who was right. ;-)


Pascal


[1] In fact, I am strongly convinced of this.

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186232884.904925.192940@o61g2000hsh.googlegroups.com>
On 4 Aug., 14:48, Pascal Costanza <····@p-cos.net> wrote:

> If you can come up with a static type system that handles such cases, I
> am in.

You may relax, since no type system will ever "handle" eval the way
you want it.
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <x91i7x93rfoi$.273v751lzqz9.dlg@40tude.net>
Pascal Costanza wrote:

> (defclass person ()
>    ((name :initarg :name :reader person-name)))
> 
> (defvar *pascal* (make-instance 'person :name "Pascal"))
> 
> (defun test ()
>    (eval (read))
>    (print (person-name *pascal*))
>    (print (person-address *pascal*)))

This would even fail in Common Lisp, unless I missed person-address in the
CLHS :-)

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1uaim6zhsw868$.500bss6e6ac8.dlg@40tude.net>
Frank Buss wrote:

> Pascal Costanza wrote:
> 
>> (defclass person ()
>>    ((name :initarg :name :reader person-name)))
>> 
>> (defvar *pascal* (make-instance 'person :name "Pascal"))
>> 
>> (defun test ()
>>    (eval (read))
>>    (print (person-name *pascal*))
>>    (print (person-address *pascal*)))
> 
> This would even fail in Common Lisp, unless I missed person-address in the
> CLHS :-)

Looks like I missed the point. A trivial solution would be to enter

(defun person-address (person)
  (when (equal "Pascal" (person-name person)) "Europe"))

when it prompts for an input, but I'm sure there are better ways with CLOS
and MOP to modify the class of *pascal* on-the-fly and adding a new method.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-FBC201.19320503082007@news-europe.giganews.com>
In article <·······························@40tude.net>,
 Frank Buss <··@frank-buss.de> wrote:

> Frank Buss wrote:
> 
> > Pascal Costanza wrote:
> > 
> >> (defclass person ()
> >>    ((name :initarg :name :reader person-name)))
> >> 
> >> (defvar *pascal* (make-instance 'person :name "Pascal"))
> >> 
> >> (defun test ()
> >>    (eval (read))
> >>    (print (person-name *pascal*))
> >>    (print (person-address *pascal*)))
> > 
> > This would even fail in Common Lisp, unless I missed person-address in the
> > CLHS :-)
> 
> Looks like I missed the point. A trivial solution would be to enter
> 
> (defun person-address (person)
>   (when (equal "Pascal" (person-name person)) "Europe"))
> 
> when it prompts for an input, but I'm sure there are better ways with CLOS
> and MOP to modify the class of *pascal* on-the-fly and adding a new method.

Let it read this one...

(progn
  (defclass person ()
    ((name :initarg :name :reader person-name)
     (address :initarg :name :reader person-address)))
  (setf (slot-value *pascal* 'address) "lambda road 1"))

-- 
http://lispm.dyndns.org
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hjaa5F3jq0cuU3@mid.individual.net>
Rainer Joswig wrote:
> In article <·······························@40tude.net>,
>  Frank Buss <··@frank-buss.de> wrote:
> 
>> Frank Buss wrote:
>>
>>> Pascal Costanza wrote:
>>>
>>>> (defclass person ()
>>>>    ((name :initarg :name :reader person-name)))
>>>>
>>>> (defvar *pascal* (make-instance 'person :name "Pascal"))
>>>>
>>>> (defun test ()
>>>>    (eval (read))
>>>>    (print (person-name *pascal*))
>>>>    (print (person-address *pascal*)))
>>> This would even fail in Common Lisp, unless I missed person-address in the
>>> CLHS :-)
>> Looks like I missed the point. A trivial solution would be to enter
>>
>> (defun person-address (person)
>>   (when (equal "Pascal" (person-name person)) "Europe"))
>>
>> when it prompts for an input, but I'm sure there are better ways with CLOS
>> and MOP to modify the class of *pascal* on-the-fly and adding a new method.
> 
> Let it read this one...
> 
> (progn
>   (defclass person ()
>     ((name :initarg :name :reader person-name)
>      (address :initarg :name :reader person-address)))
>   (setf (slot-value *pascal* 'address) "lambda road 1"))

If it reads only the defclass form, that's already enough - I can wait 
for the slot-unbound error to fix the contents of the respective slot in 
existing objects.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-AE3E4F.14404104082007@news-europe.giganews.com>
In article <···············@mid.individual.net>,
 Pascal Costanza <··@p-cos.net> wrote:

> Rainer Joswig wrote:
> > In article <·······························@40tude.net>,
> >  Frank Buss <··@frank-buss.de> wrote:
> > 
> >> Frank Buss wrote:
> >>
> >>> Pascal Costanza wrote:
> >>>
> >>>> (defclass person ()
> >>>>    ((name :initarg :name :reader person-name)))
> >>>>
> >>>> (defvar *pascal* (make-instance 'person :name "Pascal"))
> >>>>
> >>>> (defun test ()
> >>>>    (eval (read))
> >>>>    (print (person-name *pascal*))
> >>>>    (print (person-address *pascal*)))
> >>> This would even fail in Common Lisp, unless I missed person-address in the
> >>> CLHS :-)
> >> Looks like I missed the point. A trivial solution would be to enter
> >>
> >> (defun person-address (person)
> >>   (when (equal "Pascal" (person-name person)) "Europe"))
> >>
> >> when it prompts for an input, but I'm sure there are better ways with CLOS
> >> and MOP to modify the class of *pascal* on-the-fly and adding a new method.
> > 
> > Let it read this one...
> > 
> > (progn
> >   (defclass person ()
> >     ((name :initarg :name :reader person-name)
> >      (address :initarg :name :reader person-address)))
> >   (setf (slot-value *pascal* 'address) "lambda road 1"))
> 
> If it reads only the defclass form, that's already enough - I can wait 
> for the slot-unbound error to fix the contents of the respective slot in 
> existing objects.
> 
> 
> Pascal

Umh, right. Programming as a puzzle. Solving step by step... ;-)

-- 
http://lispm.dyndns.org
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-1AE40B.19261503082007@news-europe.giganews.com>
In article <······························@40tude.net>,
 Frank Buss <··@frank-buss.de> wrote:

> Pascal Costanza wrote:
> 
> > (defclass person ()
> >    ((name :initarg :name :reader person-name)))
> > 
> > (defvar *pascal* (make-instance 'person :name "Pascal"))
> > 
> > (defun test ()
> >    (eval (read))
> >    (print (person-name *pascal*))
> >    (print (person-address *pascal*)))
> 
> This would even fail in Common Lisp, unless I missed person-address in the
> CLHS :-)

Did you see the EVAL-READ?

-- 
http://lispm.dyndns.org
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <igi47g075dg6.vk1wptqmzskg$.dlg@40tude.net>
Rainer Joswig wrote:

> Did you see the EVAL-READ?

Yes, I missed this first, maybe because it is not a very useful example. I
think a nice feature of Lisp is the interactive environment, which is
easier with dynamic typing, but using "eval" explicit doesn't make sense
most of the time.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hja86F3jq0cuU2@mid.individual.net>
Frank Buss wrote:
> Rainer Joswig wrote:
> 
>> Did you see the EVAL-READ?
> 
> Yes, I missed this first, maybe because it is not a very useful example. I
> think a nice feature of Lisp is the interactive environment, which is
> easier with dynamic typing, but using "eval" explicit doesn't make sense
> most of the time.

That's correct - but the purpose of my example is to illustrate a border 
case of something that a static type will never be able to check but 
still produces behavior that can be considered reasonable under certain 
circumstances.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sddejijtev4.fsf@shell01.TheWorld.com>
Pascal Costanza <··@p-cos.net> writes:

>>> Did you see the EVAL-READ?

(eval (read)) is a very special case.  Many programs need the capacity
of being programmed and scripted.  And, I would generally agree that
it is better that they get scripted in lisp than in a half-designed
poor implemented dialect of some author's hacking.

However, many security flaws are opened by unrestricted scripting.  In
that sense, you don't actually want what you propose.  You really want
the user to have access to a much simpler language where they can't
make certain kinds of mistakes or exercise certain kinds of power (or
worse let some other untrusted party exercise certain kinds of power
under their authority).  I'm currently making my living trying to find
a way to close the security holes caused by just such unrestricted
scripting.

It is far too easy (and far too dangerous) to give unrestricted
scripting as an option.  So, when you really care, you don't actually
implement (eval (read)) but in fact force oneself to design a much
more limited solution that deals with only the real cases one needs
solved and does not put unrestricted power in the hands of those who
don't realize that they need to be careful.

Yes, it takes significant time (sometimes months or years) to design
such a solution, and is thus much slower than typing (eval (read)),
but in the long run it is usually the right solution.

In fact, even in terms of patching a live system, I'm not sure I want
(eval (read)) even in the hands of gurus.  Well, yes I want them to
have it.  However, when a system has been corrupted, other than
collecting the diagnostic information, which being able to do (eval
(read)) can be indispensible, one wants to quickly isolate the system
and one cannot trust the answers it gives.  So, the time when you need
the most help , you caqnnot get it.  Patching an uncorrupted system is
another matter, but an uncorrupted system should not fail (a bit of a
pipe-dream).

On the other side of this, my take on this discussion has been about
what professional programmers should do.  The point of static typing,
even in a prototype system, is to eliminate errors.  I think there are
ways of doing unrestricted prototyping where the static type system is
there to check one "at the end" when one is done prototyping and one
wants to be certain one hasn't missed something "obvious".  My whole
issue has been that most people are not careful and one-night scripts
evolve into real programs that get passed on and modified by even more
careless people, and what might be a good description in a textbook
or academic paper is not good enough for a long-lived solution.

What we don't have, and I don't know how to get, is a retroactive tool
that takes careless code and makes it sound.  If we had that, I
wouldn't complain about how anyone wrote code, I would just simply fix
it.  Having lived through some conversions of C++ code to make them
"const-correct", I'm not sanguine about how easy making code sound is.
The idea of liskell intrigues me though, perhaps one could use that to
evolve untyped lisp into typed code.

The hard part, and I've mentioned it before is trying to determine
from something that has no type information when something is an error
(it doesn't work for any case) or an impedence mismatch (it works for
some case but not the case you are trying and fixing it for your case
will break the other working case).
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.05.04.45.05.946687@areilly.bpc-users.org>
On Sat, 04 Aug 2007 12:59:43 -0400, Chris F Clark wrote:

> The idea of liskell intrigues me though, perhaps one could use that to
> evolve untyped lisp into typed code.

There seems to be work going on in this direction in the PLT Scheme group.
There's a system of module contracts, and an experimental typed-scheme
(sub)language that I see discussed in a positive light on the mailing
list from time to time. Haven't used either myself, yet.

Cheers,

-- 
Andrew
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186226236.035944.301690@w3g2000hsg.googlegroups.com>
On 3 Aug., 18:38, Pascal Costanza <····@p-cos.net> wrote:
> Ingo Menger wrote:

> > It's like those FORTRAN IV programmers that
> > refuse(d*) to adapt to new languages because there was no FORMAT
> > statement, no computed GOTO and no COMMON block with EQUIVALENCE
> > clauses.
>
> These are straw men.

Not really. I've taken part in many "religious wars" over the last
decades and I have seen almost all the time that a particular language
(religion) has an influence on the way disciples of that religion
think about programming problems or tasks.
Thus, when being faced with a programming problem, a FORTRAN
programmer will conceptualize it in terms of COMMON blocks etc., a
perl programmer will think of regular expressions and hashes, a java
programmer will see an object modell and his holy "design patterns",
and so forth.

>
> If you can define a type system that lets the following program through,
> then I'm in. If you cannot make it acceptable, I am not interested.

>    (eval (read))

Just as if to illustrate my point: disciples of religions with eval,
for instance, tend to being faced with programming tasks that
absolutely need it. Whereas such almost never happens to the disciples
of the eval missing religions. Understandably then, one of the
formers, since 2/3 of his programms relied on eval, can not imagine
how to write useful programs in a language without eval.

Just as a side question: Does anybody know how many billions of
dollars have been spent in recent years to eliminate the C/C++/machine
code eval, also known as "buffer overrun"?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <kd3ayzzfkb.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> Thus, when being faced with a programming problem, a FORTRAN
> programmer will conceptualize it in terms of COMMON blocks etc., a
> perl programmer will think of regular expressions and hashes, a java
> programmer will see an object modell and his holy "design patterns",
> and so forth.
         ^^^^^

A Forth programmer will probably concepualize as "screens" and stack
frames.

Regards -- Markus
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186228594.782404.258540@57g2000hsv.googlegroups.com>
On 4 Aug., 13:48, ·····································@ANDTHATm-e-
leypold.de (Markus E.L. 2) wrote:
> Ingo Menger wrote:
> > Thus, when being faced with a programming problem, a FORTRAN
> > programmer will conceptualize it in terms of COMMON blocks etc., a
> > perl programmer will think of regular expressions and hashes, a java
> > programmer will see an object modell and his holy "design patterns",
> > and so forth.
>
>          ^^^^^
>
> A Forth programmer will probably concepualize as "screens" and stack
> frames.

:)
(But I think he'd rather think of vocabularies and words.)
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186239725.975746.181770@d30g2000prg.googlegroups.com>
> Just as if to illustrate my point: disciples of religions with eval,
> for instance, tend to being faced with programming tasks that
> absolutely need it. Whereas such almost never happens to the disciples
> of the eval missing religions. Understandably then, one of the
> formers, since 2/3 of his programms relied on eval, can not imagine
> how to write useful programs in a language without eval.

This cuts both ways. Programmers who do not have a feature in their
toolbox will unwittingly make awkward work-arounds to make up for the
absence. In the case of eval, people will write their own ad-hoc
interpreters and extension languages. In the case of "read", people
will write their own parsers. I've done both (on the same program!),
of course, which is why I recognized the utility of these features
when I learned Lisp.
From: Chris F Clark
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <sddir7vti06.fsf@shell01.TheWorld.com>
Rayiner Hashem <·······@gmail.com> writes:

>> Just as if to illustrate my point: disciples of religions with eval,
>> for instance, tend to being faced with programming tasks that
>> absolutely need it. Whereas such almost never happens to the disciples
>> of the eval missing religions. Understandably then, one of the
>> formers, since 2/3 of his programms relied on eval, can not imagine
>> how to write useful programs in a language without eval.
>
> This cuts both ways. Programmers who do not have a feature in their
> toolbox will unwittingly make awkward work-arounds to make up for the
> absence. In the case of eval, people will write their own ad-hoc
> interpreters and extension languages. In the case of "read", people
> will write their own parsers. I've done both (on the same program!),
> of course, which is why I recognized the utility of these features
> when I learned Lisp.

This is a good point.  People without eval will often write up poor
substitutes for eval.  I made a reasonable career in the parser
generator business.  That business exists (at least in part) because
people want to create a programmable widget--i.e. they want to (eval
(read)) somewhere in their program.

Creating your own language and your own eval is very hard.  People
rarely do it right.  As a result, on comp.compilers you will find that
the stock advice is to not write ones own scripting language and
instead use a standard already developed one.  I would much rather
have people scripting in an old dialect of lisp where one treats
everything as a list than using a broken interpreter for some half-
defined language.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.05.04.35.46.281526@areilly.bpc-users.org>
On Sat, 04 Aug 2007 11:51:53 -0400, Chris F Clark wrote:

> This is a good point.  People without eval will often write up poor
> substitutes for eval.  I made a reasonable career in the parser
> generator business.  That business exists (at least in part) because
> people want to create a programmable widget--i.e. they want to (eval
> (read)) somewhere in their program.
> 
> Creating your own language and your own eval is very hard.  People
> rarely do it right.  As a result, on comp.compilers you will find that
> the stock advice is to not write ones own scripting language and
> instead use a standard already developed one.  I would much rather
> have people scripting in an old dialect of lisp where one treats
> everything as a list than using a broken interpreter for some half-
> defined language.

I've seen this argument on many occasions.  Often enough to believe it,
even though I have done my own share of ad-hoc configuration language
creation.  So I'm looking into lisp (and scheme, which gets quite a bit of
press for that application), but there's a catch to using a full-spec
language in this situation that I haven't seen addressed, at least not
neatly:  How do you sandbox scripts to *limit* what they can do?  How does
one arrange for (eval (read)) to operate in an environment where, (for
example) only basic control and arithmetic elements exist: no file or
network access)?  I want my nice, full-featured scripting language, but I
want to severely limit its vocabulary.  Is that hard?

Cheers,

-- 
Andrew
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hls21F3jqui2U1@mid.individual.net>
Andrew Reilly wrote:
> On Sat, 04 Aug 2007 11:51:53 -0400, Chris F Clark wrote:
> 
>> This is a good point.  People without eval will often write up poor
>> substitutes for eval.  I made a reasonable career in the parser
>> generator business.  That business exists (at least in part) because
>> people want to create a programmable widget--i.e. they want to (eval
>> (read)) somewhere in their program.
>>
>> Creating your own language and your own eval is very hard.  People
>> rarely do it right.  As a result, on comp.compilers you will find that
>> the stock advice is to not write ones own scripting language and
>> instead use a standard already developed one.  I would much rather
>> have people scripting in an old dialect of lisp where one treats
>> everything as a list than using a broken interpreter for some half-
>> defined language.
> 
> I've seen this argument on many occasions.  Often enough to believe it,
> even though I have done my own share of ad-hoc configuration language
> creation.  So I'm looking into lisp (and scheme, which gets quite a bit of
> press for that application), but there's a catch to using a full-spec
> language in this situation that I haven't seen addressed, at least not
> neatly:  How do you sandbox scripts to *limit* what they can do?  How does
> one arrange for (eval (read)) to operate in an environment where, (for
> example) only basic control and arithmetic elements exist: no file or
> network access)?  I want my nice, full-featured scripting language, but I
> want to severely limit its vocabulary.  Is that hard?

You could probably use package or module systems for restricting access 
to the host language. Modifying the reader in Common Lisp should also be 
of help.

It's not straightforward, though, as far as I can tell.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3uabt6acvg.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

>> It's like those FORTRAN IV programmers that
>> refuse(d*) to adapt to new languages because there was no FORMAT
>> statement, no computed GOTO and no COMMON block with EQUIVALENCE
>> clauses.
>
> These are straw men.

Shey can't be because they are no arguments only opinions. Though I
agree with Ingo that the way the opponents of static typing here argue
reminds me to the FORTRAN IV programmers.

 - No understanding of the technology opposed

 - Desperately trying to construct one case where the new technology /
   restriction won't work and blowing that case up out of all
   proportion to the corner stone of their business case: "If we don't
   have computed got, everything fails. Computed goto is an absolutely
   necessary requirement. Ther is nor replacement. Non at all." (And
   if there is, it's greenspunning).

> If you can define a type system that lets the following program
> through, then I'm in. If you cannot make it acceptable, I am not
> interested. [1]

BTW: I can help you to construct a case where a meaningful program
cannot be typed even in many of the more advanced static type systems:
The Y combinator. Indeed I'm wondering that nobody of the static
typing opponents hasn't pulled it out yet. Presumably because they
don't know enough about typing.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hnqv7F3l18f0U1@mid.individual.net>
Markus E.L. 2 wrote:
> I can help you to construct a case where a meaningful program
> cannot be typed even in many of the more advanced static type systems:
> The Y combinator. Indeed I'm wondering that nobody of the static
> typing opponents hasn't pulled it out yet. Presumably because they
> don't know enough about typing.

Likewise, the dynamic typing opponents don't seem to know enough about 
the possibilities in dynamically typed languages.

>> If you can define a type system that lets the following program
>> through, then I'm in. If you cannot make it acceptable, I am not
>> interested. [1]
>> 
>> (defclass person ()
>>   ((name :initarg :name :reader person-name)))
>> 
>> (defvar *pascal* (make-instance 'person :name "Pascal"))
>> 
>> (defun test ()
>>   (eval (read))
>>   (print (person-name *pascal*))
>>   (print (person-address *pascal*)))
>>
> 
> Care to explain what the trick is? What the program is supposed to do?

See...



Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <e0zm150zwk.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Markus E.L. 2 wrote:
>> I can help you to construct a case where a meaningful program
>> cannot be typed even in many of the more advanced static type systems:
>> The Y combinator. Indeed I'm wondering that nobody of the static
>> typing opponents hasn't pulled it out yet. Presumably because they
>> don't know enough about typing.
>
> Likewise, the dynamic typing opponents don't seem to know enough about
> the possibilities in dynamically typed languages.

My good man, I hate to break it to you, but: A dynamically _typed_
language is actually a statically typed language with exactly one type
in it. Don't try to hide between som kind of mysticism as if the
situation where exactly symmetric.

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-79C4B1.13263606082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

> Pascal Costanza wrote:
> 
> > Markus E.L. 2 wrote:
> >> I can help you to construct a case where a meaningful program
> >> cannot be typed even in many of the more advanced static type systems:
> >> The Y combinator. Indeed I'm wondering that nobody of the static
> >> typing opponents hasn't pulled it out yet. Presumably because they
> >> don't know enough about typing.
> >
> > Likewise, the dynamic typing opponents don't seem to know enough about
> > the possibilities in dynamically typed languages.
> 
> My good man, I hate to break it to you, but: A dynamically _typed_
> language is actually a statically typed language with exactly one type
> in it. Don't try to hide between som kind of mysticism as if the
> situation where exactly symmetric.

Markus, I have a surprise for you: a dynamically typed language
is one that has types. But these types are not statically checked.
Still you can dynamically check the types.

Just look at Common Lisp. Execute the following in
your favorite implementation:

(type-of "string")

(type-of 1/3)

(string #(1 2 3))

(typep 100 '(integer 70 90))

> 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9773r$prp$1@news.xmission.com>
Rainer Joswig wrote:

[...]

> Markus, I have a surprise for you: a dynamically typed language
> is one that has types. But these types are not statically checked.
> Still you can dynamically check the types. [...]

What Markus is not explaining very well is that a good static type 
system allows the programmer to declare what's called a discriminated union:

http://en.wikipedia.org/wiki/Tagged_union

The "dynamic type system" of Scheme and Common Lisp is the equivalent of 
one single large discriminated union declaration in a static type 
system. There's nothing stopping an ML programmer from declaring one 
large discriminated union that contained every type handled by the 
Scheme or Lisp programming language, and thus turn ML into a 
"dynamically-typed" programming language.

Of course ML programmers don't do this. Instead, programs in ML will 
tend to have multiple small discriminated unions. A function can be 
written to take an argument of a type specified by a discriminated 
union, which effectively means that that function can take any one of 
the things inside that discriminated union. More than that, the compiler 
will guarantee that the function explicitly handles every case.

-thant
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <873aywzins.fsf@geddis.org>
Thant Tessman <···@standarddeviance.com> wrote on Mon, 06 Aug 2007:
> The "dynamic type system" of Scheme and Common Lisp is the equivalent of one
> single large discriminated union declaration in a static type system.

Yes, it can be viewed that way.  But now you've left out the run-time
"type exceptions" that dynamically typed languages do indeed throw.

You've basically just gotten at the difference between type safety and
static/dynamic type checking.  C is not type safe.  Lisp (a dynamically typed
language) and OCaml (a statically typed language) are type safe.

> There's nothing stopping an ML programmer from declaring one large
> discriminated union that contained every type handled by the Scheme or Lisp
> programming language, and thus turn ML into a "dynamically-typed"
> programming language.

Ah, but there is.  For one thing, the ML type checker will automatically
infer more limited types than desired by such a hypothetical ML programmer.
Even if you defined such a discriminated union, it's still that case that
when you 
        define F(x) = x + 1;
that ML will conclude F is a function from int->int, rather than a function
from Union->Union.  And ML will complain, and refuse to compile, a program
that tries to subsequently do
        F("hello!")

Whereas in Lisp, you can define the function F as above, call it with the
string, compile that code, run it, and get a runtime type exception generated
and wind up in the debugger (where you can patch the existing code and
continue).

You can't do that in ML, even if you wanted to.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
When my wife and I married we agreed that she would make all the unimportant
decisions, and I would make all the important decisions.  In 30 years of
marriage, there have been no important decisions.  -- Albert Einstein
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f97pfj$gfn$1@news.xmission.com>
Don Geddis wrote:
> Thant Tessman <···@standarddeviance.com> wrote on Mon, 06 Aug 2007:
>> The "dynamic type system" of Scheme and Common Lisp is the equivalent of one
>> single large discriminated union declaration in a static type system.
> 
> Yes, it can be viewed that way.  But now you've left out the run-time
> "type exceptions" that dynamically typed languages do indeed throw.

The ML version would throw a "type error" exception at runtime too. 
There really is no difference.

[...]

>> There's nothing stopping an ML programmer from declaring one large
>> discriminated union that contained every type handled by the Scheme or Lisp
>> programming language, and thus turn ML into a "dynamically-typed"
>> programming language.
> 
> Ah, but there is.  For one thing, the ML type checker will automatically
> infer more limited types than desired by such a hypothetical ML programmer.
> Even if you defined such a discriminated union, it's still that case that
> when you 
>         define F(x) = x + 1;
> that ML will conclude F is a function from int->int, rather than a function
> from Union->Union.

To program in a true dynamically-typed style, you would use a 
constructor around the one to build the instance of the Union. Then the 
type system would indeed infer Union->Union. Yes this is a syntactic 
inconvenience, but it is in no sense a programming style beyond the 
reach of SML.

[...]

-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1pmyx32w8k.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Ah, but there is.  For one thing, the ML type checker will automatically
> infer more limited types than desired by such a hypothetical ML programmer.

No.

> Even if you defined such a discriminated union, it's still that case that
> when you 
>         define F(x) = x + 1;
> that ML will conclude F is a function from int->int, rather than a function
> from Union->Union.  And ML will complain, and refuse to compile, a program
> that tries to subsequently do
>         F("hello!")

No.

You misunderstood / missed the point. Some OCaml session for your
elucidation (some responses have been elided):

   # type dynamic =

       Int    of int
           | Float  of float
       | String of string
       | Nil
       | Cons   of dynamic * dynamic
     ;;
                                           [...]
   # exception Dynamictype_error;;
                                           [...]
   # let increment x =
         match x with
           Int x -> Int (x+1)
         | _     -> raise Dynamictype_error

     ;;
   val increment : dynamic -> dynamic = <fun>      [!]

   # let foo = Int    7;;
   val foo : dynamic = Int 7
   # let bar = String "hello";;
   val bar : dynamic = String "hello"
   # increment foo;;
   increment bar;;- : dynamic = Int 8
   # increment bar;;
   Exception: Dynamictype_error.


- M
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <iwodhkirfj.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> Rainer Joswig wrote:
>
> [...]
>
>> Markus, I have a surprise for you: a dynamically typed language
>> is one that has types. But these types are not statically checked.
>> Still you can dynamically check the types. [...]
>
> What Markus is not explaining very well is that a good static type
> system allows the programmer to declare what's called a discriminated
> union:
>
> http://en.wikipedia.org/wiki/Tagged_union

I've not tried to explain. I just _mentioned_ it as well known fact
(come one, people, rage aginst that) that from teh point of view of a
static type theoretician a dynamic type system is just a type system
with one static type in it (and what they call types are actually the
constructors/discriminants of the variants).

Regards -- Markus
From: John Thingstad
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <op.twm3srtypqzri1@pandora.upc.no>
>>
>> My good man, I hate to break it to you, but: A dynamically _typed_
>> language is actually a statically typed language with exactly one type
>> in it. Don't try to hide between som kind of mysticism as if the
>> situation where exactly symmetric.
>
> Markus, I have a surprise for you: a dynamically typed language
> is one that has types. But these types are not statically checked.
> Still you can dynamically check the types.
>

A better way to put it is that objects have types, but variables do not.
A vaiable is just a plceholder on which you attach a object.
(Well actually for efficiency reasons this is not entirely true, but it  
will almost always behave as though it was. The exception being if  
variable type is declared or the object is a integer and the eq vs. eql.)
In fact I find it a clumbsy and arbitrary to restrict a variable to one  
type.
If you want to make sure the value stored in a variable is of a certain  
type the use check-type as in
(check-type seconds (integer 0 59))
For efficiency reasons you could add
(declare (type (fixnum seconds ...)))
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186407702.761431.251450@22g2000hsm.googlegroups.com>
On 6 Aug., 15:30, "John Thingstad" <··············@chello.no> wrote:
> >> My good man, I hate to break it to you, but: A dynamically _typed_
> >> language is actually a statically typed language with exactly one type
> >> in it. Don't try to hide between som kind of mysticism as if the
> >> situation where exactly symmetric.
>
> > Markus, I have a surprise for you: a dynamically typed language
> > is one that has types. But these types are not statically checked.
> > Still you can dynamically check the types.
>
> A better way to put it is that objects have types, but variables do not.
> A vaiable is just a plceholder on which you attach a object.
> (Well actually for efficiency reasons this is not entirely true, but it  
> will almost always behave as though it was. The exception being if  
> variable type is declared or the object is a integer and the eq vs. eql.)
> In fact I find it a clumbsy and arbitrary to restrict a variable to one  
> type.

This is a good start for another religious war about purity.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <i8k5s8irb1.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 6 Aug., 15:30, "John Thingstad" <··············@chello.no> wrote:
>> >> My good man, I hate to break it to you, but: A dynamically _typed_
>> >> language is actually a statically typed language with exactly one type
>> >> in it. Don't try to hide between som kind of mysticism as if the
>> >> situation where exactly symmetric.
>>
>> > Markus, I have a surprise for you: a dynamically typed language
>> > is one that has types. But these types are not statically checked.
>> > Still you can dynamically check the types.
>>
>> A better way to put it is that objects have types, but variables do not.
>> A vaiable is just a plceholder on which you attach a object.
>> (Well actually for efficiency reasons this is not entirely true, but it  
>> will almost always behave as though it was. The exception being if  
>> variable type is declared or the object is a integer and the eq vs. eql.)
>> In fact I find it a clumbsy and arbitrary to restrict a variable to one  
>> type.
>
> This is a good start for another religious war about purity.

Yes. I have the honor to bring this new war to you. Unfortunately I'm
now called away by more urgent business, but I will be certain to drop
by some centuries later and have a quick survey of survivor.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2d4y0rc0e.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L. 2) wrote:
>
>> Pascal Costanza wrote:
>> 
>> > Markus E.L. 2 wrote:
>> >> I can help you to construct a case where a meaningful program
>> >> cannot be typed even in many of the more advanced static type systems:
>> >> The Y combinator. Indeed I'm wondering that nobody of the static
>> >> typing opponents hasn't pulled it out yet. Presumably because they
>> >> don't know enough about typing.
>> >
>> > Likewise, the dynamic typing opponents don't seem to know enough about
>> > the possibilities in dynamically typed languages.
>> 
>> My good man, I hate to break it to you, but: A dynamically _typed_
>> language is actually a statically typed language with exactly one type
>> in it. Don't try to hide between som kind of mysticism as if the
>> situation where exactly symmetric.
>
> Markus, I have a surprise for you: a dynamically typed language
> is one that has types. 

As I said: It's not fundamentally different from a statically typed
language where I work with a large union type for some purposes. The
advantages Pascal is touting are propably not thos of "dynaimically
typed languages" (there are none since I can always fall back on
dynamic typeing in a statically typed language by defining appropriate
union types), but those of a "dynamic language" (in the sense I've
seen you using the expression first here on c.l.f).

> But these types are not statically checked.
> Still you can dynamically check the types.

That would actually be subtypes :-). Perhaps there is one cause for
the misunderstandings that have plagued this discussion: Subtypes are
often modeled as sub sets of the carrier sets of their base types
(like 0...12 might be a subtype of integer), but the "proper" types
are much simpler and mostly considered disjoint by the typing
algorithms (even if they actually overlap structurally). Subtyping
(which is more akin to a complete data flow analysis) is, I think,
more difficult than "simple" typing and has a relationship to theorem
proofing: To drop a dynamic test in a binding or assignment the 

Perhaps that is where the sometimes false expectations on static type
systems stem from. I'm not sure. But the whole thread has given me the
impression that a number of participants is talking on cross purposes
and actually about quite different things.

> Just look at Common Lisp. Execute the following in
> your favorite implementation:
>
> (type-of "string")
>
> (type-of 1/3)
>
> (string #(1 2 3))
>
> (typep 100 '(integer 70 90))


So what is the point here? I suspect it's actually proving the point
I've been just trying to make. What is called type (dynamic type) in
Lisp is actually on of the variants of a unit type in a static type
system. type-of just returns an identification of the constructor
(tag) used to contruct the value.

Like in the following OCaml fragment


    type data    = MkString of string | MkRational of int * int;;

    type type_id = String | Rational 

    let type_of = function 

        MkString _     -> String
      | MkRational _   -> Rational
    ;;


There is much to be said and to be defended about the point of view
I'm proposing (especially concerning the allegation of 'greenspunning'
that will certainly be voiced) and I'm certain a number of vocal
opponents could be found. But I'd like to humbly put to consideration
the point that not having a dynamic type allows one to eliminate all
knowledge of the types in the virtual or actual machine to which one
is compiling. E.g. the CAML machine is quite poorer in types than the
language itself. That might be a disadvantage in some scenarios
(i.g. introspection and reflection) but an advantage in other cases
(as compiling to a native target so aggressively that one actually
uses any type information at the level of the generated program for
the intented target).

BTW: I've been rather impressed by Lisp over the years (as soon as I
get into the philosphy of the thing and try not to judge it as an
outsider), but I seriously doubt you can manufacture an argument
_against_ static type systems from the (supposed) success of
Lisp. What you can argue (but not only from Lisp, but also from
Python, even Perl and PHP) that static type systems are not strictly
necessary to get software running (and another aside Perl and PHP
software are usually classes more bug ridden than Lisp software: One
can try to speculate why this is so (to the advantage of Lisp :-),
wether it's the culture or the checking tools that _are_ actually
there in Lisp and sadly missing in Perl and PHP (the languages don't
lend themselves to writing third party tools easily, which in itself
is something to thing about)).


Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6FFC59.18341007082007@news-europe.giganews.com>
In article <·············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

> So what is the point here? I suspect it's actually proving the point
> I've been just trying to make. What is called type (dynamic type) in
> Lisp is actually on of the variants of a unit type in a static type
> system. type-of just returns an identification of the constructor
> (tag) used to contruct the value.
> 
> Like in the following OCaml fragment
> 
> 
>     type data    = MkString of string | MkRational of int * int;;
> 
>     type type_id = String | Rational 
> 
>     let type_of = function 
> 
>         MkString _     -> String
>       | MkRational _   -> Rational
>     ;;
> 

That's emulation. You have to code. In a dynamic language it
is built-in. No code required.

> BTW: I've been rather impressed by Lisp over the years (as soon as I
> get into the philosphy of the thing and try not to judge it as an
> outsider), but I seriously doubt you can manufacture an argument
> _against_ static type systems from the (supposed) success of
> Lisp. What you can argue (but not only from Lisp, but also from
> Python, even Perl and PHP) that static type systems are not strictly
> necessary to get software running (and another aside Perl and PHP
> software are usually classes more bug ridden than Lisp software: One
> can try to speculate why this is so (to the advantage of Lisp :-),
> wether it's the culture or the checking tools that _are_ actually
> there in Lisp and sadly missing in Perl and PHP (the languages don't
> lend themselves to writing third party tools easily, which in itself
> is something to thing about)).

I guess PHP as a scripting language has a sloppy implementation in C.
It inherits lots of problems via poor language design
and poor implementation practices. The scripts were usually
short running and failure was difficult to debug anyway.

Lisp on the other hand is used in applications that
tend to run months or years. Implementors put effort
into robustness, there are runtime type checks, exception
handling, runtime checks for data access and more. Over
the time quite a few very demanding systems have been
developed - if those were failing every other hour
you bet that the underlying problems get fixed.

There was just mentioned a classic Lisp application:
American Express' 'Authorizer's assistant'. It checks
the more complex credit card transactions.  It is a
rule-based expert system with 35000 rules (2002).
This thing is now deployed for almost two decades (with some
change of deployment platform). It has very high traffic,
24x7 operation and a high business value.

 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <x0sl6vxp0l.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <·············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L. 2) wrote:
>
>> So what is the point here? I suspect it's actually proving the point
>> I've been just trying to make. What is called type (dynamic type) in
>> Lisp is actually on of the variants of a unit type in a static type
>> system. type-of just returns an identification of the constructor
>> (tag) used to contruct the value.
>> 
>> Like in the following OCaml fragment
>> 
>> 
>>     type data    = MkString of string | MkRational of int * int;;
>> 
>>     type type_id = String | Rational 
>> 
>>     let type_of = function 
>> 
>>         MkString _     -> String
>>       | MkRational _   -> Rational
>>     ;;
>> 
>
> That's emulation. You have to code. In a dynamic language it
> is built-in. No code required.

Yes. I didn't say its built in. I only refuted Pascals claim that
dynamic typing is fundamentally different and provides additional
features we poor static typers can't understand. We can.


> I guess PHP as a scripting language has a sloppy implementation in C.
> It inherits lots of problems via poor language design
> and poor implementation practices. The scripts were usually
> short running and failure was difficult to debug anyway.

One of the points where I'd completely agree with you.

> Lisp on the other hand is used in applications that
> tend to run months or years. Implementors put effort
> into robustness, there are runtime type checks, exception
> handling, runtime checks for data access and more. Over
> the time quite a few very demanding systems have been
> developed - if those were failing every other hour
> you bet that the underlying problems get fixed.

:-) Absolutely. IFF I needed a dynamic system (that is couldn't live
with what OCaml provides and would not want static typing) I almost
certainly would choose Lisp. 

BTW: Of the free implementations, which would you recommend.


Regards -- Markus 
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hph1iF3k0tjtU1@mid.individual.net>
Markus E.L. 2 wrote:
> Pascal Costanza wrote:
> 
>> Markus E.L. 2 wrote:
>>> I can help you to construct a case where a meaningful program
>>> cannot be typed even in many of the more advanced static type systems:
>>> The Y combinator. Indeed I'm wondering that nobody of the static
>>> typing opponents hasn't pulled it out yet. Presumably because they
>>> don't know enough about typing.
>> Likewise, the dynamic typing opponents don't seem to know enough about
>> the possibilities in dynamically typed languages.
> 
> My good man, I hate to break it to you, but: A dynamically _typed_
> language is actually a statically typed language with exactly one type
> in it. Don't try to hide between som kind of mysticism as if the
> situation where exactly symmetric.

This is a fruitless aside.

You still don't know what is possible in dynamically typed languages.

If you prefer to nitpick about terminology rather than to learn 
something, that's your problem.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural     language Minim
Date: 
Message-ID: <f986g6$5j5$1@news.xmission.com>
Pascal Costanza wrote:
> Markus E.L. 2 wrote:
>> Pascal Costanza wrote:
>>
>>> Markus E.L. 2 wrote:
>>>> I can help you to construct a case where a meaningful program
>>>> cannot be typed even in many of the more advanced static type systems:
>>>> The Y combinator. Indeed I'm wondering that nobody of the static
>>>> typing opponents hasn't pulled it out yet. Presumably because they
>>>> don't know enough about typing.
>>> Likewise, the dynamic typing opponents don't seem to know enough about
>>> the possibilities in dynamically typed languages.
>>
>> My good man, I hate to break it to you, but: A dynamically _typed_
>> language is actually a statically typed language with exactly one type
>> in it. Don't try to hide between som kind of mysticism as if the
>> situation where exactly symmetric.
> 
> This is a fruitless aside.
> 
> You still don't know what is possible in dynamically typed languages.

There are folks on both sides of this debate who aren't speaking from a 
position of experience. Don't know about Markus in particular, but I've 
programmed in Scheme more than I've programmed in SML. I've even done a 
bit of macro programming:

    http://www.standarddeviance.com/p45-tessman.pdf

Anyway, in this particular case, Markus' point is a valid one. Building 
and using such a universal type and its associated infrastructure would 
  be awkward, and as a practical matter it would not at all lend itself 
to macro programming. But static type systems do not preclude 
programming in a "dynamically-typed" style. What it would do is force 
the programmer to acknowledge in the code where "dynamic type errors" 
are allowed to happen. If there is any Don Geddis cost associated with 
static type systems as such, this is it.

-thant
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-0BE6B2.00231407082007@news-europe.giganews.com>
In article <············@news.xmission.com>,
 Thant Tessman <···@standarddeviance.com> wrote:

> Pascal Costanza wrote:
> > Markus E.L. 2 wrote:
> >> Pascal Costanza wrote:
> >>
> >>> Markus E.L. 2 wrote:
> >>>> I can help you to construct a case where a meaningful program
> >>>> cannot be typed even in many of the more advanced static type systems:
> >>>> The Y combinator. Indeed I'm wondering that nobody of the static
> >>>> typing opponents hasn't pulled it out yet. Presumably because they
> >>>> don't know enough about typing.
> >>> Likewise, the dynamic typing opponents don't seem to know enough about
> >>> the possibilities in dynamically typed languages.
> >>
> >> My good man, I hate to break it to you, but: A dynamically _typed_
> >> language is actually a statically typed language with exactly one type
> >> in it. Don't try to hide between som kind of mysticism as if the
> >> situation where exactly symmetric.
> > 
> > This is a fruitless aside.
> > 
> > You still don't know what is possible in dynamically typed languages.
> 
> There are folks on both sides of this debate who aren't speaking from a 
> position of experience. Don't know about Markus in particular, but I've 
> programmed in Scheme more than I've programmed in SML. I've even done a 
> bit of macro programming:
> 
>     http://www.standarddeviance.com/p45-tessman.pdf
> 
> Anyway, in this particular case, Markus' point is a valid one. Building 
> and using such a universal type and its associated infrastructure would 
>   be awkward, and as a practical matter it would not at all lend itself 
> to macro programming. But static type systems do not preclude 
> programming in a "dynamically-typed" style. What it would do is force 
> the programmer to acknowledge in the code where "dynamic type errors" 
> are allowed to happen. If there is any Don Geddis cost associated with 
> static type systems as such, this is it.
> 
> -thant


He said: 

"A dynamically _typed_ language is actually a statically typed language with
exactly one type in it."

Which is wrong. A dynamically typed language is about
dynamic types at runtime. A statically typed language
does not need types at runtime. Some have. Others have not.
Optional. Not so with dynamically typed languages.
There runtime types are not optional. It is required.
Also the nature of static types and dynamic types is
different. Dynamic types are attached to values. You
can pass the value around. The type stays the same.

-- 
http://lispm.dyndns.org
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f987kr$l8s$1@news.xmission.com>
Rainer Joswig wrote:

[...]

> He said: 
> 
> "A dynamically _typed_ language is actually a statically typed language with
> exactly one type in it."
> 
> Which is wrong. 

If you want to pick nits, it's a single discriminated union type (a.k.a. 
datatype) containing multiple atomic types. (The types within a 
discriminated union don't have to be atomic types, but in this case they 
would be.)


> A dynamically typed language is about
> dynamic types at runtime.

So are discriminated unions.


> A statically typed language
> does not need types at runtime.

In the case of discriminated unions, it does. There is a type tag 
checked at runtime. As I said, the difference is that in the case of a 
statically-typed system (more properly it's called a "type system" 
without the "static") is that the compiler will verify that your code 
handles all the possible cases. You can explicitly *not* handle cases 
(in which case an exception is thrown in the not-handled cases), but it 
is explicit.

[...]

-thant
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-B222A6.01161607082007@news-europe.giganews.com>
In article <············@news.xmission.com>,
 Thant Tessman <···@standarddeviance.com> wrote:

> Rainer Joswig wrote:
> 
> [...]
> 
> > He said: 
> > 
> > "A dynamically _typed_ language is actually a statically typed language with
> > exactly one type in it."
> > 
> > Which is wrong. 
> 
> If you want to pick nits, it's a single discriminated union type (a.k.a. 
> datatype) containing multiple atomic types.

Would I have to list the types upfront? does the order matter?

> (The types within a 
> discriminated union don't have to be atomic types, but in this case they 
> would be.)
> 
> 
> > A dynamically typed language is about
> > dynamic types at runtime.
> 
> So are discriminated unions.
> 
> 
> > A statically typed language
> > does not need types at runtime.
> 
> In the case of discriminated unions, it does. There is a type tag 
> checked at runtime. As I said, the difference is that in the case of a 
> statically-typed system (more properly it's called a "type system" 
> without the "static") is that the compiler will verify that your code 
> handles all the possible cases. You can explicitly *not* handle cases 
> (in which case an exception is thrown in the not-handled cases), but it 
> is explicit.
> 
> [...]
> 
> -thant

-- 
http://lispm.dyndns.org
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f98c3m$a9b$1@news.xmission.com>
Rainer Joswig wrote:
> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:
> 
>> Rainer Joswig wrote:
>>
>> [...]
>>
>>> He said: 
>>>
>>> "A dynamically _typed_ language is actually a statically typed language with
>>> exactly one type in it."
>>>
>>> Which is wrong. 
>> If you want to pick nits, it's a single discriminated union type (a.k.a. 
>> datatype) containing multiple atomic types.
> 
> Would I have to list the types upfront? does the order matter?

Yes, you would have to include one type declaration up front describing 
the members of the type union. No, the order doesn't matter.

More than that, you'd have to define all the 'built-in' functions as 
well if you were really trying to fill in all of Scheme (minus the 
macros). That is, for example, you'd have to redefine the '+' operator 
to take values of your union type and do the right thing based on the 
actual types of the values. (You can use wildcards for a catchall case, 
which would throw an exception to signal a runtime type error.)

But it wouldn't be difficult. It might even be kinda fun just to make 
the point, or as an exercise to understand the semantics of Scheme 
better. Common Lisp would probably be a bit trickier what with its 
object system.

The point is that the type system in itself doesn't preclude a 
"dynamic-typing" style of programming.

[...]

-thant
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-E44DAD.02462407082007@news-europe.giganews.com>
In article <············@news.xmission.com>,
 Thant Tessman <···@standarddeviance.com> wrote:

> Rainer Joswig wrote:
> > In article <············@news.xmission.com>,
> >  Thant Tessman <···@standarddeviance.com> wrote:
> > 
> >> Rainer Joswig wrote:
> >>
> >> [...]
> >>
> >>> He said: 
> >>>
> >>> "A dynamically _typed_ language is actually a statically typed language with
> >>> exactly one type in it."
> >>>
> >>> Which is wrong. 
> >> If you want to pick nits, it's a single discriminated union type (a.k.a. 
> >> datatype) containing multiple atomic types.
> > 
> > Would I have to list the types upfront? does the order matter?
> 
> Yes, you would have to include one type declaration up front describing 
> the members of the type union. No, the order doesn't matter.

Given that it is a finite number of types we know upfront. Which
isn't the case with CL, for example.

> More than that, you'd have to define all the 'built-in' functions as 
> well if you were really trying to fill in all of Scheme (minus the 
> macros). That is, for example, you'd have to redefine the '+' operator 
> to take values of your union type and do the right thing based on the 
> actual types of the values. (You can use wildcards for a catchall case, 
> which would throw an exception to signal a runtime type error.)

Okay, if we have done that. Variables and functions all take
now the 'one discriminated union type'. Would we still have a
'statically typed language'? Do we have it reduced to a trivial
case?
 
> But it wouldn't be difficult. It might even be kinda fun just to make 
> the point, or as an exercise to understand the semantics of Scheme 
> better. Common Lisp would probably be a bit trickier what with its 
> object system.
> 
> The point is that the type system in itself doesn't preclude a 
> "dynamic-typing" style of programming.

Right, that was not on debate. The point was that a statically
typed language with exactly one type is not automatically a
dynamically typed language.

> 
> [...]
> 
> -thant
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f99osa$fn9$1@news.xmission.com>
Rainer Joswig wrote:
> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:

[...]

>> Yes, you would have to include one type declaration up front describing 
>> the members of the type union. No, the order doesn't matter.
> 
> Given that it is a finite number of types we know upfront. Which
> isn't the case with CL, for example.

You would have to include some type that took into account types built 
at runtime. That's why I mentioned that accounting for Common Lisp's 
object system would make things more complicated.


> Okay, if we have done that. Variables and functions all take
> now the 'one discriminated union type'. Would we still have a
> 'statically typed language'? Do we have it reduced to a trivial
> case?

It's a bit of a philosophical question, but I don't see why not.


>> The point is that the type system in itself doesn't preclude a 
>> "dynamic-typing" style of programming.
> 
> Right, that was not on debate. The point was that a statically
> typed language with exactly one type is not automatically a
> dynamically typed language.

I don't see how the difference is important. If we stick to the use of 
our one all-encompassing type and associated infrastructure, we are 
programming in a dynamically-typed language.

When I honestly put some thought into the advantages of Scheme over SML, 
I think of things like macros and what I've done with them in the past. 
Scheme is a very malleable language. The thing is, it's malleability is 
only indirectly related to its dynamically-typedness, and the stuff that 
I did build was either stuff whose value I'm not so sure of anymore 
(like object systems), or stuff that fits in just fine in a 
statically-typed setting (like language-level threading).


-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <b7sl6vz4m0.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> When I honestly put some thought into the advantages of Scheme over
> SML, I think of things like macros and what I've done with them in the
> past. Scheme is a very malleable language. The thing is, it's
> malleability is only indirectly related to its dynamically-typedness,


> and the stuff that I did build was either stuff whose value I'm not so
> sure of anymore (like object systems), or stuff that fits in just fine

I also have already written largish macro systems in scheme (too early
probably) and the "whose value I'm not so sure of anymore"
expresselyes really nicely how I feel about them now. I don't say this
to denigrate Scheme. Just that I know some other people turned away
from Scheme whose attitude has been quite similar.

> in a statically-typed setting (like language-level threading).

Regards -- Markus
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f99ov8$g0l$1@news.xmission.com>
Rainer Joswig wrote:
> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:

[...]

>> Yes, you would have to include one type declaration up front describing 
>> the members of the type union. No, the order doesn't matter.
> 
> Given that it is a finite number of types we know upfront. Which
> isn't the case with CL, for example.

You would have to include some type that took into account types built 
at runtime. That's why I mentioned that accounting for Common Lisp's 
object system would make things more complicated.


> Okay, if we have done that. Variables and functions all take
> now the 'one discriminated union type'. Would we still have a
> 'statically typed language'? Do we have it reduced to a trivial
> case?

It's a bit of a philosophical question, but I don't see why not.


>> The point is that the type system in itself doesn't preclude a 
>> "dynamic-typing" style of programming.
> 
> Right, that was not on debate. The point was that a statically
> typed language with exactly one type is not automatically a
> dynamically typed language.

I don't see how the difference is important. If we stick to the use of 
our one all-encompassing type and associated infrastructure, we are 
programming in a dynamically-typed language.

When I honestly put some thought into the advantages of Scheme over SML, 
I think of things like macros and what I've done with them in the past. 
Scheme is a very malleable language. The thing is, it's malleability is 
only indirectly related to its dynamically-typedness, and the stuff that 
I did build was either stuff whose value I'm not so sure of anymore 
(like object systems), or stuff that fits in just fine in a 
statically-typed setting (like language-level threading).


-thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <vpwsw7z4ry.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:
>
>> Rainer Joswig wrote:
>> > In article <············@news.xmission.com>,
>> >  Thant Tessman <···@standarddeviance.com> wrote:
>> > 
>> >> Rainer Joswig wrote:
>> >>
>> >> [...]
>> >>
>> >>> He said: 
>> >>>
>> >>> "A dynamically _typed_ language is actually a statically typed language with
>> >>> exactly one type in it."
>> >>>
>> >>> Which is wrong. 
>> >> If you want to pick nits, it's a single discriminated union type (a.k.a. 
>> >> datatype) containing multiple atomic types.
>> > 
>> > Would I have to list the types upfront? does the order matter?
>> 
>> Yes, you would have to include one type declaration up front describing 
>> the members of the type union. No, the order doesn't matter.
>
> Given that it is a finite number of types we know upfront. Which
> isn't the case with CL, for example.
>
>> More than that, you'd have to define all the 'built-in' functions as 
>> well if you were really trying to fill in all of Scheme (minus the 
>> macros). That is, for example, you'd have to redefine the '+' operator 
>> to take values of your union type and do the right thing based on the 
>> actual types of the values. (You can use wildcards for a catchall case, 
>> which would throw an exception to signal a runtime type error.)
>
> Okay, if we have done that. Variables and functions all take
> now the 'one discriminated union type'. Would we still have a
> 'statically typed language'? Do we have it reduced to a trivial
> case?

No, we would have a dynamically typed subsytem. _IN_ a statically
typed language. This was only to make the point that dyankic typing
might be considered a special case of static typeing, not necessarily
as an extension.

>> But it wouldn't be difficult. It might even be kinda fun just to make 
>> the point, or as an exercise to understand the semantics of Scheme 
>> better. Common Lisp would probably be a bit trickier what with its 
>> object system.
>> 
>> The point is that the type system in itself doesn't preclude a 
>> "dynamic-typing" style of programming.
>
> Right, that was not on debate. The point was that a statically
> typed language with exactly one type is not automatically a
> dynamically typed language.

No, but a dynamically typed language is a statically typed language
with one big union type and syntactic sugar for the literals.

So, there.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrpqiF3lo5j5U2@mid.individual.net>
Markus E.L. 2 wrote:

> No, we would have a dynamically typed subsytem. _IN_ a statically
> typed language. This was only to make the point that dyankic typing
> might be considered a special case of static typeing, not necessarily
> as an extension.

A dynamically typed language as a special case of a statically typed one 
will only have the features that the statically typed one can deal with. 
Ergo, it's not expressive enough for the kinds of things one may be 
interested in.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjea22i90iqe2@corp.supernews.com>
Pascal Costanza wrote:
> Markus E.L. 2 wrote:
>> No, we would have a dynamically typed subsytem. _IN_ a statically
>> typed language. This was only to make the point that dyankic typing
>> might be considered a special case of static typeing, not necessarily
>> as an extension.
> 
> A dynamically typed language as a special case of a statically typed one
> will only have the features that the statically typed one can deal with.
> Ergo, it's not expressive enough for the kinds of things one may be
> interested in.

You are assuming that it would be less expressive.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5htts3F3lnt6gU1@mid.individual.net>
Jon Harrop wrote:
> Pascal Costanza wrote:
>> Markus E.L. 2 wrote:
>>> No, we would have a dynamically typed subsytem. _IN_ a statically
>>> typed language. This was only to make the point that dyankic typing
>>> might be considered a special case of static typeing, not necessarily
>>> as an extension.
>> A dynamically typed language as a special case of a statically typed one
>> will only have the features that the statically typed one can deal with.
>> Ergo, it's not expressive enough for the kinds of things one may be
>> interested in.
> 
> You are assuming that it would be less expressive.

Excellent observation.

Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkb1fngdes0f0@corp.supernews.com>
Pascal Costanza wrote:
> Jon Harrop wrote:
>> You are assuming that it would be less expressive.
> 
> Excellent observation.

Your assumption is wrong.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjecmc64rf7e9@corp.supernews.com>
Markus E.L. 2 wrote:
> No, but a dynamically typed language is a statically typed language
> with one big union type and syntactic sugar for the literals.

Isn't it an untyped language with an extensible tagged union type and
syntactic sugar for the literals?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uld4xzz4fz.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:
>
>> Rainer Joswig wrote:
>> > In article <············@news.xmission.com>,
>> >  Thant Tessman <···@standarddeviance.com> wrote:
>> > 
>> >> Rainer Joswig wrote:
>> >>
>> >> [...]
>> >>
>> >>> He said: 
>> >>>
>> >>> "A dynamically _typed_ language is actually a statically typed language with
>> >>> exactly one type in it."
>> >>>
>> >>> Which is wrong. 
>> >> If you want to pick nits, it's a single discriminated union type (a.k.a. 
>> >> datatype) containing multiple atomic types.
>> > 
>> > Would I have to list the types upfront? does the order matter?
>> 
>> Yes, you would have to include one type declaration up front describing 
>> the members of the type union. No, the order doesn't matter.
>
> Given that it is a finite number of types we know upfront. Which
> isn't the case with CL, for example.

And how does the CL runtime do it? Basically by

   type type_tag = int

   let new_type_tag = let x = ref 0 in fun () -> x := !x+1

   let dynamic_type =
       ...

     | Abstract_Type of type_tag * dynamic_type      
     | ...


   

I'd bet (Yes this can be extended, but the principle stays the same).

- M
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ry1wef1fa0.fsf@hod.lan.m-e-leypold.de>
Thant Tessman wrote:

> Rainer Joswig wrote:
>> In article <············@news.xmission.com>,
>>  Thant Tessman <···@standarddeviance.com> wrote:
>>
>>> Rainer Joswig wrote:
>>>
>>> [...]
>>>
>>>> He said:
>>>>
>>>> "A dynamically _typed_ language is actually a statically typed language with
>>>> exactly one type in it."
>>>>
>>>> Which is wrong.
>>> If you want to pick nits, it's a single discriminated union type
>>> (a.k.a. datatype) containing multiple atomic types.
>> Would I have to list the types upfront? does the order matter?
>
> Yes, you would have to include one type declaration up front
> describing the members of the type union. No, the order doesn't matter.
>
> More than that, you'd have to define all the 'built-in' functions as
> well if you were really trying to fill in all of Scheme (minus the
> macros). That is, for example, you'd have to redefine the '+' operator
> to take values of your union type and do the right thing based on the
> actual types of the values. (You can use wildcards for a catchall
> case, which would throw an exception to signal a runtime type error.)

Yeah, but this can all put in 1 dynamic runtime which can be reused
for ever program. But that is not the point: I initially opposed the
claim that "dynamic typing" is fundamantally different from static
typing (so I, presumably only knowing static typing, wouldn't know
about it). Instead of pulling out various experiences in dynamically
typed languages (which would be met with: But Lisp has even more of
...) I prefered to take the point that static typing actually
encompasses dynamic typing as a special case (and some syntactic sugar
for interfacing with the run time, like compiling "hello" as something
that would be (String "hello") in the statically typed language.

> But it wouldn't be difficult. It might even be kinda fun just to make
> the point, or as an exercise to understand the semantics of Scheme
> better. 

I'm already doing it when I find the time. The purpose is to embed a
scheme-like interpreter in OCaml, put a non-Scheme syntax surface (or
more than one) on it to have a simple and general method of parsing
configuation files.


> Common Lisp would probably be a bit trickier what with its
> object system.
>
> The point is that the type system in itself doesn't preclude a
> "dynamic-typing" style of programming.

Exactly. And so we static typers already know a lot about dynamic
typing ... 

Regards -- Markus

>
> [...]
>
> -thant
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <c57io71fnx.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:
>
>> Rainer Joswig wrote:
>> 
>> [...]
>> 
>> > He said: 
>> > 
>> > "A dynamically _typed_ language is actually a statically typed language with
>> > exactly one type in it."
>> > 
>> > Which is wrong. 
>> 
>> If you want to pick nits, it's a single discriminated union type (a.k.a. 
>> datatype) containing multiple atomic types.
>
> Would I have to list the types upfront? does the order matter?

No. You'd do 

  open DynamicTypes;;

which just would have one long definition like

  type dynamic =

      Int    of int
    | Float  of float
    | String of string
    | Nil
    | Cons   of dynamic * dynamic
    | ...
  ;;

which is the same for every program.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <b81wefz425.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <············@news.xmission.com>,
>  Thant Tessman <···@standarddeviance.com> wrote:
>
>> Pascal Costanza wrote:
>> > Markus E.L. 2 wrote:
>> >> Pascal Costanza wrote:
>> >>
>> >>> Markus E.L. 2 wrote:
>> >>>> I can help you to construct a case where a meaningful program
>> >>>> cannot be typed even in many of the more advanced static type systems:
>> >>>> The Y combinator. Indeed I'm wondering that nobody of the static
>> >>>> typing opponents hasn't pulled it out yet. Presumably because they
>> >>>> don't know enough about typing.
>> >>> Likewise, the dynamic typing opponents don't seem to know enough about
>> >>> the possibilities in dynamically typed languages.
>> >>
>> >> My good man, I hate to break it to you, but: A dynamically _typed_
>> >> language is actually a statically typed language with exactly one type
>> >> in it. Don't try to hide between som kind of mysticism as if the
>> >> situation where exactly symmetric.
>> > 
>> > This is a fruitless aside.
>> > 
>> > You still don't know what is possible in dynamically typed languages.
>> 
>> There are folks on both sides of this debate who aren't speaking from a 
>> position of experience. Don't know about Markus in particular, but I've 
>> programmed in Scheme more than I've programmed in SML. I've even done a 
>> bit of macro programming:
>> 
>>     http://www.standarddeviance.com/p45-tessman.pdf
>> 
>> Anyway, in this particular case, Markus' point is a valid one. Building 
>> and using such a universal type and its associated infrastructure would 
>>   be awkward, and as a practical matter it would not at all lend itself 
>> to macro programming. But static type systems do not preclude 
>> programming in a "dynamically-typed" style. What it would do is force 
>> the programmer to acknowledge in the code where "dynamic type errors" 
>> are allowed to happen. If there is any Don Geddis cost associated with 
>> static type systems as such, this is it.
>> 
>> -thant
>
>
> He said: 
>
> "A dynamically _typed_ language is actually a statically typed language with
> exactly one type in it."
>
> Which is wrong. A dynamically typed language is about
> dynamic types at runtime. A statically typed language
> does not need types at runtime. Some have. Others have not.
> Optional. Not so with dynamically typed languages.
> There runtime types are not optional. It is required.
> Also the nature of static types and dynamic types is
> different. Dynamic types are attached to values. You
> can pass the value around. The type stays the same.

I think I've refuted that sufficiently in my other posts. See there.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural       language Minim
Date: 
Message-ID: <5hqgm0F3lcpvbU1@mid.individual.net>
Thant Tessman wrote:
> Pascal Costanza wrote:
>> Markus E.L. 2 wrote:
>>> Pascal Costanza wrote:
>>>
>>>> Markus E.L. 2 wrote:
>>>>> I can help you to construct a case where a meaningful program
>>>>> cannot be typed even in many of the more advanced static type systems:
>>>>> The Y combinator. Indeed I'm wondering that nobody of the static
>>>>> typing opponents hasn't pulled it out yet. Presumably because they
>>>>> don't know enough about typing.
>>>> Likewise, the dynamic typing opponents don't seem to know enough about
>>>> the possibilities in dynamically typed languages.
>>>
>>> My good man, I hate to break it to you, but: A dynamically _typed_
>>> language is actually a statically typed language with exactly one type
>>> in it. Don't try to hide between som kind of mysticism as if the
>>> situation where exactly symmetric.
>>
>> This is a fruitless aside.
>>
>> You still don't know what is possible in dynamically typed languages.
> 
> There are folks on both sides of this debate who aren't speaking from a 
> position of experience. Don't know about Markus in particular, but I've 
> programmed in Scheme more than I've programmed in SML. I've even done a 
> bit of macro programming:
> 
>    http://www.standarddeviance.com/p45-tessman.pdf
> 
> Anyway, in this particular case, Markus' point is a valid one. Building 
> and using such a universal type and its associated infrastructure would 
>  be awkward, and as a practical matter it would not at all lend itself 
> to macro programming. But static type systems do not preclude 
> programming in a "dynamically-typed" style. What it would do is force 
> the programmer to acknowledge in the code where "dynamic type errors" 
> are allowed to happen. If there is any Don Geddis cost associated with 
> static type systems as such, this is it.

This is still a fruitless aside. I have shown a code fragment in a 
dynamically typed language which I am convinced cannot be statically 
type checked. And now you're dancing around this example arguing about 
terminology and the ability to simulate dynamic typing in a statically 
typed language without ever getting to the heart of the issue.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xy7gnvobx.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> This is still a fruitless aside. I have shown a code fragment in a
> dynamically typed language which I am convinced cannot be statically
> type checked. 

You mean this?

     (defun test ()
       (eval (read))
       (print (person-name *pascal*))
       (print (person-address *pascal*)))

> And now you're dancing around this example arguing about terminology
> and the ability to simulate dynamic typing in a statically typed
> language without ever getting to the heart of the issue.

Er, the heart of the issue is exactly what?  If that simulation were
worth using, the static compiler could be made generate pretty much
the same code that a Lisp compiler would generate (I mean with tagged
pointers and all that) instead of the more typical representation with
cons nodes, if you're moaning about runtime overhead.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hql7qF3l3gbtU1@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>> This is still a fruitless aside. I have shown a code fragment in a
>> dynamically typed language which I am convinced cannot be statically
>> type checked. 
> 
> You mean this?
> 
>      (defun test ()
>        (eval (read))
>        (print (person-name *pascal*))
>        (print (person-address *pascal*)))

Yes, the issue here is that a static type checker would have to reject 
the call to person-address because it is not defined at compile time. In 
a dynamically typed language, this program may still succeed.

I will explain later how this works and why this is useful.

>> And now you're dancing around this example arguing about terminology
>> and the ability to simulate dynamic typing in a statically typed
>> language without ever getting to the heart of the issue.
> 
> Er, the heart of the issue is exactly what?  If that simulation were
> worth using, the static compiler could be made generate pretty much
> the same code that a Lisp compiler would generate (I mean with tagged
> pointers and all that) instead of the more typical representation with
> cons nodes, if you're moaning about runtime overhead.

Sure, you can simulate dynamic typing in a statically typed language. 
But you don't get the features that are possible because of dynamic 
typing, unless you reimplement an interpreter for a full-fledged 
dynamically typed language. In other words, this is turning into a 
Turing-equivalence argument.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xvebr945k.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> >        (eval (read))
> >        (print (person-name *pascal*))
> >        (print (person-address *pascal*)))
> 
> Yes, the issue here is that a static type checker would have to reject
> the call to person-address because it is not defined at compile time. 

Oh, I see, the eval has a defun in it.  I'm not impressed; do you
really do that often enough that you're going to hurt your fingers by
entering a type annotation in a situation like that?  (Actually I'm
going to have to hope some ML expert weighs in on whether it could
work.  Basically the eval would have to generate and load an external
module, which I think ML can do in some typesafe way).

And how are you going to do that in Scheme, which doesn't have eval?
(Well it didn't back in the day, I'm not sure about now).

> Sure, you can simulate dynamic typing in a statically typed
> language. But you don't get the features that are possible because of
> dynamic typing, unless you reimplement an interpreter for a
> full-fledged dynamically typed language. In other words, this is
> turning into a Turing-equivalence argument.

No really, it's still the same language, fully compiled by the same
compiler, maybe with some compiler optimizations added to help deal
with a style that static programmers normally don't use, and maybe
with some syntax sugar to avoid some excess annotations at the bottom
level.  
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hrr4jF3m04h0U2@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>>>        (eval (read))
>>>        (print (person-name *pascal*))
>>>        (print (person-address *pascal*)))
>> Yes, the issue here is that a static type checker would have to reject
>> the call to person-address because it is not defined at compile time. 
> 
> Oh, I see, the eval has a defun in it.  I'm not impressed; do you
> really do that often enough that you're going to hurt your fingers by
> entering a type annotation in a situation like that?  (Actually I'm
> going to have to hope some ML expert weighs in on whether it could
> work.  Basically the eval would have to generate and load an external
> module, which I think ML can do in some typesafe way).
> 
> And how are you going to do that in Scheme, which doesn't have eval?
> (Well it didn't back in the day, I'm not sure about now).

R5RS Scheme does. I am not sure how powerful it is, though.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f99pmk$p00$1@news.xmission.com>
Pascal Costanza wrote:
> Paul Rubin wrote:
>> Pascal Costanza <··@p-cos.net> writes:
>>> This is still a fruitless aside. I have shown a code fragment in a
>>> dynamically typed language which I am convinced cannot be statically
>>> type checked. 
>>
>> You mean this?
>>
>>      (defun test ()
>>        (eval (read))
>>        (print (person-name *pascal*))
>>        (print (person-address *pascal*)))
> 
> Yes, the issue here is that a static type checker would have to reject 
> the call to person-address because it is not defined at compile time. In 
> a dynamically typed language, this program may still succeed.

No, you couldn't refer to not-yet-bound variables in the system I 
described elsewhere, but this is arguably a different issue.


> I will explain later how this works and why this is useful.

I understand this allows you to do some things that you couldn't do in 
e.g. SML, but the ability to use variables that have yet to be bound (or 
bound again later) can also be a very dangerous thing. We avoided it in 
our Scheme code.

[...]

-thant
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186472902.563988.93740@d55g2000hsg.googlegroups.com>
On 7 Aug., 08:10, Pascal Costanza <····@p-cos.net> wrote:

> This is still a fruitless aside. I have shown a code fragment in a
> dynamically typed language which I am convinced cannot be statically
> type checked.

You are right. No program with unrestricted EVAL in it can be type
checked in a meaningful way.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x643rz46e.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Thant Tessman wrote:
>> Pascal Costanza wrote:
>>> Markus E.L. 2 wrote:
>>>> Pascal Costanza wrote:
>>>>
>>>>> Markus E.L. 2 wrote:
>>>>>> I can help you to construct a case where a meaningful program
>>>>>> cannot be typed even in many of the more advanced static type systems:
>>>>>> The Y combinator. Indeed I'm wondering that nobody of the static
>>>>>> typing opponents hasn't pulled it out yet. Presumably because they
>>>>>> don't know enough about typing.
>>>>> Likewise, the dynamic typing opponents don't seem to know enough about
>>>>> the possibilities in dynamically typed languages.
>>>>
>>>> My good man, I hate to break it to you, but: A dynamically _typed_
>>>> language is actually a statically typed language with exactly one type
>>>> in it. Don't try to hide between som kind of mysticism as if the
>>>> situation where exactly symmetric.
>>>
>>> This is a fruitless aside.
>>>
>>> You still don't know what is possible in dynamically typed languages.
>> There are folks on both sides of this debate who aren't speaking
>> from a position of experience. Don't know about Markus in
>> particular, but I've programmed in Scheme more than I've programmed
>> in SML. I've even done a bit of macro programming:
>>    http://www.standarddeviance.com/p45-tessman.pdf
>> Anyway, in this particular case, Markus' point is a valid
>> one. Building and using such a universal type and its associated
>> infrastructure would  be awkward, and as a practical matter it would
>> not at all lend itself to macro programming. But static type systems
>> do not preclude programming in a "dynamically-typed" style. What it
>> would do is force the programmer to acknowledge in the code where
>> "dynamic type errors" are allowed to happen. If there is any Don
>> Geddis cost associated with static type systems as such, this is it.
>
> This is still a fruitless aside. I have shown a code fragment in a
> dynamically typed language which I am convinced cannot be statically
> type checked. And now you're dancing around this example arguing about
> terminology and the ability to simulate dynamic typing in a statically
> typed language without ever getting to the heart of the issue.
>

You said 

| Likewise, the dynamic typing opponents don't seem to know enough about
| the possibilities in dynamically typed languages.

I say: But we know enough, since dyanmic typing is a special case of
static typing.

As far as eval goes: The code cannot be statically typed _in_ the
static language, but it can be rewritten for a dynamic
subsystem. Basically that amounts then to embedding an interpreter
which uses most of the infrastructure of the underlying static host
language. Yes, it's greenspuning of some kind. But it has applications
(e.g. dynamic plugins and config files). But that's beside the point:
The point is, that the fundamental difference and the symmetry "the
dynamic typing opponents don't seem to know enough about the
possibilities" doesn't exist. At least not if the opponenent in
question knows how to build simple interpreters on top of his/her
languages.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrq3eF3lvv8uU1@mid.individual.net>
Markus E.L. 2 wrote:

> The point is, that the fundamental difference and the symmetry "the
> dynamic typing opponents don't seem to know enough about the
> possibilities" doesn't exist. At least not if the opponenent in
> question knows how to build simple interpreters on top of his/her
> languages.

That's a Turing-equivalence argument. Expressiveness is something else. 
See http://citeseer.ist.psu.edu/felleisen90expressive.html

In other words, you cannot embed something like (eval (read)) in your 
statically typed language. You can only implement an interpreter for (a 
subset of) Common Lisp (or a language with similar features) to get the 
same level of expressiveness. It will probably never be possible to 
statically type check such a language, or at least only to a certain 
extent (i.e., it can only emit warnings, but not errors).

For such a claim, it doesn't matter a single bit what the underlying 
language is like that implements interpreter in question. It only needs 
to be Turing equivalent, that's all.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <mps1zfb72.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Markus E.L. 2 wrote:
>
>> The point is, that the fundamental difference and the symmetry "the
>> dynamic typing opponents don't seem to know enough about the
>> possibilities" doesn't exist. At least not if the opponenent in
>> question knows how to build simple interpreters on top of his/her
>> languages.
>
> That's a Turing-equivalence argument. Expressiveness is something
> else. See http://citeseer.ist.psu.edu/felleisen90expressive.html
>
> In other words, you cannot embed something like (eval (read)) in your
> statically typed language. You can only implement an interpreter for
> (a subset of) Common Lisp (or a language with similar features) to get
> the same level of expressiveness. It will probably never be possible
> to statically type check such a language, or at least only to a
> certain extent (i.e., it can only emit warnings, but not errors).
>
> For such a claim, it doesn't matter a single bit what the underlying
> language is like that implements interpreter in question. It only
> needs to be Turing equivalent, that's all.

Pascal, do you read c.l.f? You have been posting 4 replies to me so
far and if you read c.l.f I'd prefer to (a) continue the discussion on
this sub topic (b) tie together the answer on all 4 posts and (c) do
so on c.l.f. where the questions "is a dynamically typed language a
special case of a statically typed on" is more on topic.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrt48F3lm45fU1@mid.individual.net>
Markus E.L. 2 wrote:
> 
> Pascal Costanza wrote:
> 
>> Markus E.L. 2 wrote:
>>
>>> The point is, that the fundamental difference and the symmetry "the
>>> dynamic typing opponents don't seem to know enough about the
>>> possibilities" doesn't exist. At least not if the opponenent in
>>> question knows how to build simple interpreters on top of his/her
>>> languages.
>> That's a Turing-equivalence argument. Expressiveness is something
>> else. See http://citeseer.ist.psu.edu/felleisen90expressive.html
>>
>> In other words, you cannot embed something like (eval (read)) in your
>> statically typed language. You can only implement an interpreter for
>> (a subset of) Common Lisp (or a language with similar features) to get
>> the same level of expressiveness. It will probably never be possible
>> to statically type check such a language, or at least only to a
>> certain extent (i.e., it can only emit warnings, but not errors).
>>
>> For such a claim, it doesn't matter a single bit what the underlying
>> language is like that implements interpreter in question. It only
>> needs to be Turing equivalent, that's all.
> 
> Pascal, do you read c.l.f? 

Not this thread.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <jnd4xzgnsc.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Markus E.L. 2 wrote:
>> Pascal Costanza wrote:
>>
>>> Markus E.L. 2 wrote:
>>>
>>>> The point is, that the fundamental difference and the symmetry "the
>>>> dynamic typing opponents don't seem to know enough about the
>>>> possibilities" doesn't exist. At least not if the opponenent in
>>>> question knows how to build simple interpreters on top of his/her
>>>> languages.
>>> That's a Turing-equivalence argument. Expressiveness is something
>>> else. See http://citeseer.ist.psu.edu/felleisen90expressive.html
>>>
>>> In other words, you cannot embed something like (eval (read)) in your
>>> statically typed language. You can only implement an interpreter for
>>> (a subset of) Common Lisp (or a language with similar features) to get
>>> the same level of expressiveness. It will probably never be possible
>>> to statically type check such a language, or at least only to a
>>> certain extent (i.e., it can only emit warnings, but not errors).
>>>
>>> For such a claim, it doesn't matter a single bit what the underlying
>>> language is like that implements interpreter in question. It only
>>> needs to be Turing equivalent, that's all.
>> Pascal, do you read c.l.f?
>
> Not this thread.

You don't subscribe to the group? And you can subscribe to threads
selectively? Or perhaps I misunderstood. Well, I think than we'll have
to leave the discussion since we cannot find common ground where it is
onT. I'll perhaps answer _some_ of your posts next weekend, but
perhaps alos not, contingent of course on that I don't find that
everything has been said already.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bje1ict8m1cd5@corp.supernews.com>
Markus E.L. 2 wrote:
> As far as eval goes: The code cannot be statically typed _in_ the
> static language...

MetaOCaml does this.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <a0643uacs6.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Ingo Menger wrote:
>
>>> I just want to inform you about the fact that there are people who care
>>> about the programs that static type systems cannot check and who
>>> understand what the underlying issues are.
>> I respect that, because I can imagine that such programs really
>> exist.
>> But when I hear statements like "the type system disturbs me in my
>> usual code-some-idea-and-test-it cycle", or the like then I have the
>> impression that certain people can't see how development in a strongly
>> typed language is like.
>
> This cuts both ways.
>
>> It's like those FORTRAN IV programmers that
>> refuse(d*) to adapt to new languages because there was no FORMAT
>> statement, no computed GOTO and no COMMON block with EQUIVALENCE
>> clauses.
>
> These are straw men.
>
> If you can define a type system that lets the following program
> through, then I'm in. If you cannot make it acceptable, I am not
> interested. [1]
>
> (defclass person ()
>    ((name :initarg :name :reader person-name)))
>
> (defvar *pascal* (make-instance 'person :name "Pascal"))
>
> (defun test ()
>    (eval (read))
>    (print (person-name *pascal*))
>    (print (person-address *pascal*)))
>
>

Care to explain what the trick is? What the program is supposed to do?
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrqvjF3m04h0U1@mid.individual.net>
Markus E.L. 2 wrote:
> Pascal Costanza wrote:
> 
>> If you can define a type system that lets the following program
>> through, then I'm in. If you cannot make it acceptable, I am not
>> interested. [1]
> 
> Care to explain what the trick is? What the program is supposed to do?
> 

OK, here we go.

 >> (defclass person ()
 >>   ((name :initarg :name :reader person-name)))
 >>
 >> (defvar *pascal* (make-instance 'person :name "Pascal"))

At this stage, there is a class 'person (like in typical object-oriented 
languages) with one field 'name. It can be accessed with the reader 
(getter) 'person-name. *pascal* is a global variable that holds an 
instance of that class with the 'name "Pascal".

 >> (defun test ()
 >>   (eval (read))

(eval (read)) reads an expression from the user and evaluates it in the 
global environment.

For example, a user may enter the following form:

(defclass person ()
   ((name :initarg :name :reader person-name)
    (address :initarg :address :reader person-address)))

This will update the current definition of the class 'person and makes 
it have two fields now, the former 'name and the new 'address with the 
reader 'person-address.

 >>   (print (person-name *pascal*))

This prints the 'name of *pascal* without any problems.

 >>   (print (person-address *pascal*)))

This attempts to access the 'address of *pascal*. Since it hasn't been 
initialized yet, though, it will throw an error indicating that the 
field is undefined. The user is presented a dialog in which she can 
choose to initialize the field with a proper address (for example 
"Brussels"). After that hot fix, the program happily continues to print 
that address.

Ergo, the program has been successfully executed.

What I have described here is well-defined semantics of Common Lisp. See 
http://www.lispworks.com/documentation/HyperSpec/Body/04_cf.htm for details.

This is a technique that is actually used in practice. Of course, users 
are _not_ given the opportunity to mess with the system in such a direct 
way. However, maintainers can patch systems in this way at runtime. The 
error that is thrown on uninitialized fields can be programmatically 
caught, for example in order to automatically fill such fields with 
values from a database.

It may be possible to provide similar functionality while preserving 
static type checking. In fact, there are a number of a approaches 
suggested for doing this. However, what I have seen so far never goes as 
far as Common Lisp goes. What Common Lisp offers is still not ideal, and 
the lack of a static type system is certainly not the most pressing issue.

If you can define a good static type system for programs that can be 
dynamically upgraded without unnecessary restrictions, go ahead. I would 
definitely be impressed. But I am not holding my breath...


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5sc6j03.fsf@geddis.org>
Ingo Menger <···········@consultant.com> wrote on Fri, 03 Aug 2007:
> My personal opinion here is that we will have untyped languages for toy
> scripts, as it used to be. And for the rest, where dollars or human lifes
> count, we'll have languages with even more advanced type systems.

Presumably, you mean to include dynamically typed languages like Common Lisp
under your "untyped" category, and compile-time statically typed languages
as the precursors to these "even more advanced type systems".

Surely you can see how insulting your comments are to the highly skilled
professional programmers who are currently choosing to implement their
algorithms in languages like Common Lisp; who are aware of and have used
languages like Haskell and OCaml and ML; and who nonetheless prefer
developing production-quality code in a dynamically typed language?

Perhaps you should start to ask what benefits these mature programmers
believe they are getting from languages like Common Lisp.  Perhaps you'll
begin to see that programming language design involves tradeoffs, and that
your cherished compile-time static typing is not necessarily a Universal
Good.

> The time is not so far away when we will regard an
> ArrayIndexOutOfBoundsException a typing error, just as we today may regard
> a NullPointerException a typing error

Really.

Consider this program (in pseudocode):

        define array A [1..78556];
        set i = compute_smallest_sierpinski_number();
        set A[i] = 10;

It may help you to reference
        http://en.wikipedia.org/wiki/Sierpinski_number
In short, the smallest Sierpinski number is probably 78557, but there
remains a chance that there is a smaller one.  The answer is deterministic,
computable, the algorithm is known ... but the answer is not yet known by
any human being.

Good luck inventing a compile-time static type system that will label the
final line of code as "ArrayIndexOutOfBounds", at compile time, prior to
actually running the code and possibly (or not!) generating an out-of-bounds
exception.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
A good way to threaten somebody is to light a stick of dynamite.  Then you call
the guy and hold the burning fuse up to the phone.  "Hear that?" you say.
"That's dynamite, baby."  -- Deep Thoughts by Jack Handey [SNL]
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xsl6z50g5.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> Surely you can see how insulting your comments are to the highly skilled
> professional programmers who are currently choosing to implement their
> algorithms in languages like Common Lisp; who are aware of and have used
> languages like Haskell and OCaml and ML; and who nonetheless prefer
> developing production-quality code in a dynamically typed language?

ARE there many programmers like that?  

> > [Ingo:] The time is not so far away when we will regard an
> > ArrayIndexOutOfBoundsException a typing error, just as we today may regard
> > a NullPointerException a typing error

> Consider this program (in pseudocode):
> 
>         define array A [1..78556];
>         set i = compute_smallest_sierpinski_number();
>         set A[i] = 10;
> The smallest Sierpinski number is probably 78557, but there
> remains a chance that there is a smaller one....
> Good luck inventing a compile-time static type system that will label the
> final line of code as "ArrayIndexOutOfBounds", at compile time, prior to
> actually running the code and possibly (or not!) generating an out-of-bounds
> exception.

I think you are misunderstanding what Ingo was getting at.  Such a
type checker is not required to prove that the index is actually out
of bounds, in order to reject the program.  If you want to guarantee
the absence of ArrayIndexOutOfBounds runtime exceptions, it's enough
for the type checker to reject any program that MIGHT generate them.
It does not have to guarantee that every rejected program WILL
generate them.  Typecheckers only make assertions about the programs
they accept, not the ones they reject.    

Obviously a type checker could still accept the above program if it
could somehow prove that i < 78557, which sounds currently unfeasible.
It sounds like there's a simpler proof that 78557 is a Sierpinski
number, which in turn can be encoded as a type and attached to "i", in
which case it's enough to just replace 78556 with 78557 in the
definition of A to get the program to typecheck.  If there was no such
proof (or if it was impractically difficult to encode), then you
simply couldn't write type-correct code in that style, so you'd
replace A with a different data structure than an array.
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uzm17940c.fsf@nhplace.com>
[ comp.lang.lisp only; http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Paul Rubin <·············@NOSPAM.invalid> writes:

> Don Geddis <···@geddis.org> writes:
> > Surely you can see how insulting your comments are to the highly skilled
> > professional programmers who are currently choosing to implement their
> > algorithms in languages like Common Lisp; who are aware of and have used
> > languages like Haskell and OCaml and ML; and who nonetheless prefer
> > developing production-quality code in a dynamically typed language?
> 
> ARE there many programmers like that?  

Yes, I think there are such people.  Others will likely allege to you
that there are not (since they've as much as implied that belief in a
few other posts) and will perhaps call for such people to step forward.

But what on earth would even motivate someone to answer such a call?  

Would it be the opportunity to be grilled by this group of impartial
observers in the even-handed manner that this discussion has been
handled so far?

Perhaps it would be the joy of having to self-identify as "highly
skilled" at all, only to have their credentials, work history, skill,
and other factors publicly scrutinized as to whether their stepping
forward even counts?

Maybe it's the exciting prospect of having to "prove" that their
knowledge of what these communities has offered them is sufficient for
their choice to abandon it to be regarded as an informed one?  (If
they've opted for a life filled with "fewer useless proofs", surely
the opportunity to prove the justness of this choice will be high on
their list of ways to usefully spend their time...  Hmmm. Maybe not.)

Or maybe it would just be the thrill of breaking new conversational
ground by arguing over the meaning of the term "many"? (Once N
people do step forward, we can be pretty sure that someone in this
never-ending discussion will take issue with whether N constitutes
"many", or whether the brave souls willing to subject themselves to
this discussion will be classified as anomalous bad data).

What _does_ count as "many" in my book is the number of reasons why
coming forward to answer this question would be not worth anyone's
time or effort.  There's no apparent upside...
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87odhn5ehj.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 03 Aug 2007 22:3:
> Such a type checker is not required to prove that the index is actually out
> of bounds, in order to reject the program.  If you want to guarantee the
> absence of ArrayIndexOutOfBounds runtime exceptions, it's enough for the
> type checker to reject any program that MIGHT generate them.  It does not
> have to guarantee that every rejected program WILL generate them.
> Typecheckers only make assertions about the programs they accept, not the
> ones they reject.

Ah, that's a fine approach, of course.

But if you're going to commit to that approach, then let me switch to a
completely different criticism: there are plenty of programs that are not
provably type-safe.  A dynamically typed language will allow you to execute
these programs.  A statically typed language, of the kind you're suggesting,
will refuse to execute such programs (even if they don't wind up causing a
type exception at runtime).

So now the problem is Ingo's claim that dynamically typed languages are only
useful for "toy" problems, and that any real programmer working on a real
problem would of course use a static type checker.

Can you, at least, understand that (in addition to the benefits of a static
type checker) there is a downside to those kinds of languages as well?  That
there might be some benefit to a programmer using a dynamically typed
language?

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Children are natural mimics who act like their parents despite every effort to
teach them good manners.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x1wejq8lk.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> But if you're going to commit to that approach, then let me switch
> to a completely different criticism: there are plenty of programs
> that are not provably type-safe.  A dynamically typed language will
> allow you to execute these programs.  A statically typed language,
> of the kind you're suggesting, will refuse to execute such programs
> (even if they don't wind up causing a type exception at runtime).

I think most languges would not consider an index-out-of-bounds
exception to be a type error.  A type error would be something like
taking the square root of a string.  

What Ingo is getting at about advanced type systems is that if we
consider index-out-of-bounds exceptions to be intolerable, we can
construct types that encode arbitrarily complex reasoning to reject
constructions that might cause that exception.  I.e. the constructor
for any subscript operation on such a type would embed a mathematical
proof that the subscript was in range.  If we choose to follow a
discipline that forbid index-out-of-bounds exceptions, then our
options would be 1) actually figure out such proofs for each
subscript; or 2) use something like a hash table instead of a linear
vector, so that every subscript would actually be in range.  We would
do #1 in the simple cases (and there are actually a lot of those) and
#2 in the complex cases like the example you gave.  Of course we don't
need an advanced type system if we just pick #2 all the time.  But
since using arrays instead of lookup tables has performance advantages
(etc.), we gain something from advanced type systems that let us keep
pushing the boundary between "simple" and "complex", so we can choose
#1 more of the time.

> So now the problem is Ingo's claim that dynamically typed languages
> are only useful for "toy" problems, and that any real programmer
> working on a real problem would of course use a static type checker.
> Can you, at least, understand that (in addition to the benefits of a
> static type checker) there is a downside to those kinds of languages
> as well?  That there might be some benefit to a programmer using a
> dynamically typed language?

Well, Ingo was talking about programs where lives and dollars were at
stake, which I interpret to mean programs for which benefits to end
users (e.g. an airline passenger is an end user of avionics software)
are more important than benefits to programmers.  

Anyway, sure, dynamic typing has benefits, I think we all agree on
that.  What's not so clear is whether the benefits outweigh the costs
on all that many problems.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <873ayy6glc.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 14:4:
> What Ingo is getting at about advanced type systems is that if we
> consider index-out-of-bounds exceptions to be intolerable, we can
> construct types that encode arbitrarily complex reasoning to reject
> constructions that might cause that exception.

Yes, I'm aware that you have a (potentially) Turing-complete hammer, and so
every problem you encounter looks like a nail to you.

The question is whether there is any value to a programmer discipline that
uses this approach.  If you require (via the language, say) every programmer
to only execute programs after the static checker can prove them type-safe,
and thus in my example you require the programmer to assist the checker in
eliminating all index-out-of-bounds errors at compile time ... in such a case,
do you wind up with: more code written? less errors in production code?
high quality of output?  do software companies adopting such an approach wind
up with higher market caps than those that don't?

The dynamic typing fans would say that you're forcing programmer effort into
non-productive avenues.  You're requiring a lot of effort, for very little
payoff.  You could redirect that same programmer effort to far more valuable
approaches, and wind up with better (more capable, less errors) code than the
approach you're suggesting.

But yes, it is "possible".

> I.e. the constructor for any subscript operation on such a type would embed
> a mathematical proof that the subscript was in range.  If we choose to
> follow a discipline that forbid index-out-of-bounds exceptions, then our
> options would be 1) actually figure out such proofs for each subscript; or
> 2) use something like a hash table instead of a linear vector, so that
> every subscript would actually be in range.  We would do #1 in the simple
> cases (and there are actually a lot of those) and #2 in the complex cases
> like the example you gave.  Of course we don't need an advanced type system
> if we just pick #2 all the time.  But since using arrays instead of lookup
> tables has performance advantages (etc.), we gain something from advanced
> type systems that let us keep pushing the boundary between "simple" and
> "complex", so we can choose #1 more of the time.

Yes, I agree that this is an approach that can be considered.

I disagree with Ingo (and your?) claim that this is necessarily a superior
approach as a programming methodology in all cases.  And, in particular, that
dynamic typing languages are necessarily only good for "toy" problems, and
not suited for production-quality code.

>> So now the problem is Ingo's claim that dynamically typed languages
>> are only useful for "toy" problems, and that any real programmer
>> working on a real problem would of course use a static type checker.
>> Can you, at least, understand that (in addition to the benefits of a
>> static type checker) there is a downside to those kinds of languages
>> as well?  That there might be some benefit to a programmer using a
>> dynamically typed language?
>
> Well, Ingo was talking about programs where lives and dollars were at
> stake, which I interpret to mean programs for which benefits to end
> users (e.g. an airline passenger is an end user of avionics software)
> are more important than benefits to programmers.

And you persist in the arrogant claim that, whenever the quality of the
result matters, static type checking results in superior code delivered.

That claim simply has not been demonstrated, and there are good reasons to
believe that it is false.

Moreover, there are plenty of examples of complex systems being delivered to
end users successfully, while based on dynamically typed languages.  Even in
your domain of airline passengers, we have the easy example that ITA
implemented orbitz.com in (a version of) Lisp.

"Lives and dollars at stake" does NOT mean that static type checking is a
clear win.  Only static type zealots, unfamiliar with the benefits of dynamic
type checking, believe such a claim in the absence of evidence.

> Anyway, sure, dynamic typing has benefits, I think we all agree on
> that.  What's not so clear is whether the benefits outweigh the costs
> on all that many problems.

At last you're starting to explore the space, that these kinds of language
decisions embody tradeoffs, with both pros and cons.  Perhaps we can start
listing some of the pros AND cons of language designs that choose static
type checking vs. dynamic type checking.  And that might start to get a
handle on what kinds of problems are more amenable to being solved with a
language that has one style vs. another.  And, finally, we might be able to
explore an imagined super-language, which provides programmer-specified type
checking, so that the programmer can divide and conquer his problem, and
perhaps subject a portion of his code to static checking, while reserving
other portions for dynamic checking.

Rather than: "programming languages with dynamic type checking are only
useful for toy scripts, while any serious programming in the future will be
done with advanced static type checking."

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If a kid asks where rain comes from, I think a cute thing to tell him is "God
is crying."  And if he asks why God is crying, another cute thing to tell him
is "Probably because of something you did."  -- Deep Thoughts, by Jack Handey
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <swswa8xxh.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 14:4:
>> What Ingo is getting at about advanced type systems is that if we
>> consider index-out-of-bounds exceptions to be intolerable, we can
>> construct types that encode arbitrarily complex reasoning to reject
>> constructions that might cause that exception.
>
> Yes, I'm aware that you have a (potentially) Turing-complete hammer, and so
> every problem you encounter looks like a nail to you.

Fortunately most type systems are not turing complete. They judge the
type soundness of a programm in a finite time.


-- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <qmsl6y8xj3.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> And you persist in the arrogant claim that, whenever the quality of the
> result matters, static type checking results in superior code delivered.
>
> That claim simply has not been demonstrated, and there are good reasons to
> believe that it is false.
>

Certainly, certainly. Somehow I've the impression you're trying to
ignore some dekade sof software engineering research.

> Moreover, there are plenty of examples of complex systems being delivered to
> end users successfully, while based on dynamically typed languages.  Even in

"Good enough" doesn't mean it couldn't have been better.

> your domain of airline passengers, we have the easy example that ITA
> implemented orbitz.com in (a version of) Lisp.

You arrogant way to argue your points has many logical error. Sorry.

-- Markus



> "Lives and dollars at stake" does NOT mean that static type checking is a
> clear win.  Only static type zealots, unfamiliar with the benefits of dynamic
> type checking, believe such a claim in the absence of evidence.

Actually, the language still often used for high integrity systems --
Ada -- is statically typed. That alone doesn't cut it, but needs to be
augmented by verification. Still, a type system is a good basis to
begin a verification because it provide the basic scaffolding in which
the contracts can be expressed (the definition/input domain of the
operations). Everythings else would be mathematically unsound and/or
cumbersome.

>
>> Anyway, sure, dynamic typing has benefits, I think we all agree on
>> that.  What's not so clear is whether the benefits outweigh the costs
>> on all that many problems.
>
> At last you're starting to explore the space, that these kinds of language
> decisions embody tradeoffs, with both pros and cons.  Perhaps we can start
> listing some of the pros AND cons of language designs that choose static
> type checking vs. dynamic type checking.  And that might start to get a
> handle on what kinds of problems are more amenable to being solved with a
> language that has one style vs. another.  And, finally, we might be able to
> explore an imagined super-language, which provides programmer-specified type
> checking, so that the programmer can divide and conquer his problem, and

AFAIK Qi (od Mark Tarver) does this: User specified type
checking. Still: It's type checking. Why would you ant to have user
specified checking if you don't believe in static typing at all?

> perhaps subject a portion of his code to static checking, while reserving
> other portions for dynamic checking.

Make for really effiecient code and lean interfaces. I do believe you
don't have a clue about what you're talking. Note that I don't oppose
languages with dynamic typing. They have interesting properties. They
are not a sell-out to the devil. But I'd prefer that the opponents of
static typing have at least elementary know how on usage and
application of static typing before they start riding their attacks.

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87abt51epm.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Sun, 05 Aug 2007:
> Why would you [w]ant to have user specified checking if you don't believe
> in static typing at all?

Ah.  You've missed the thrust of my criticisms.

I'm actually a huge fan of having automatic tools that provide additional
help to me as a programmer.  There are all sorts of compile-time checks that
might fall into this category.

What I'm against is having the tools impose additional requirements upon _me_,
as a programmer.  If I can clearly express an algorithm, and get it to execute
correctly in one programming language, I have very little tolerance for a new
tool (like a static type checker) to refuse to execute that code in the future,
unless I provide it additional required information.

The question is simply, which is to be master, and which is to be slave?
The programmer?  Or the tool?

Fans of static type checking always promote the benefits ("look at this error
which can be caught automatically!") without ever acknowledging the costs.
They either dismiss them, or just assume that the costs are zero.

But the truth is that, like most things in programming language design,
static type checking is a tradeoff.  It offers some (minor? major?) benefit,
and imposes some (major? minor?) costs to a programmer.  It's far from clear,
in general, whether this tradeoff is a net gain on average.

Now, this is not a necessary property of so-called "static type systems".
It's just the way all languages promoted by static typing fans work.  With
additional rules on the programmer, the static type checker tool can provide
additional benefit.

Most static typing fans compare a recent language like OCaml with something
that is type unsafe, like C.  And they wonder, how could anyone prefer such
a mess like C?

But that's not the only alternative.  As you've commented elsewhere,
safe/unsafe typing is a different axis from static/dynamic type checking.
Lisp is a language with safe typing, but dynamic type checks.  So we can
agree on the value of type safety, at least.

Then it comes to the type checks themselves.  Doing lots of work at compile
time is of course a great idea if possible.  But I'd be a much bigger fan if
this was an optional add-on tool, to be used if and only if the programmer
found it of value for a particular problem.  More in the style of lint
        http://en.wikipedia.org/wiki/Lint_programming_tool
for C programs.  Lint doesn't alter the compiler; it doesn't prevent code
from being executed.  It merely offers additional commentary on source code,
if that is found valuable for a particular development project.  Perhaps some
software house will make its own local requirement, say that a staff programmer
should never check in any code to the shared repository unless it also passes
lint with no errors/warnings.  But that's a local choice; not a draconian
requirement of the language.

Could this work with type checking?  Yes.  CMUCL is an implementation of
Common Lisp that does extensive type inference at compile time.  And offers
plenty of compiler commentary, including suspected (or known!) type errors,
optimizations taken (or failed, and for what reasons), etc.

But the key is that CMUCL _also_ happily compiles valid Common Lisp code,
regardless of whether its static type checker is able to prove the code
type safe.

So: it is not that I see no benefit to static type checking.  It is that I
see unacceptable drawbacks to changes in source code required by the kinds of
static type checkers implemented by extreme fans of the topic.  To me, the
benefits seem minor (but not zero), and the drawbacks seem major (definitely
not zero).

Hence the strong response when static typing fans talk about how dynamic
languages are only good for "toy problems" or programming that is not mission
critical or where lives are not at stake, as well as the claim that the
future of programming languages of course will involve some more advanced
static type checker, and if you don't agree then you're simply ignorant or
naive.

I'm not ignorant, I'm not naive, and I still don't think the benefits of
(typical) static type checkers are worth the cost (in freedom) to me as a
programmer.  Not today, and not in some future hypothetical system either.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
People just naturally assume that dogs would be incapable of working together
on some sort of construction project.  But what about just a big field full of
holes?  -- Deep Thoughts, by Jack Handey
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186272117.251109.59740@g12g2000prg.googlegroups.com>
> Anyway, sure, dynamic typing has benefits, I think we all agree on
> that.  What's not so clear is whether the benefits outweigh the costs
> on all that many problems.

I could just as easily say "anyway, sure, static typing has benefits,
I think we all agree on that. What's not so clear is whether the
benefits outweigh the costs on all that many problems". Who is right
depends on nasty empirical observations such as how often programmers
encounter domain invariants that can be easily expressed in existing
type systems, how often static type systems get in the way of
extensive refactoring, how often type errors make it past testsuites,
etc. You can evaluate these in terms of things like the relative value
(to the customer) of theoretical correctness versus features or
development time, etc.

Since you have admitted that theoretical arguments are futile here,
since you can't prove your point theoretically, where is your
experimental evidence that dynamic languages are only suitable for toy
problems?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6mistft.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> I could just as easily say "anyway, sure, static typing has benefits,
> I think we all agree on that. What's not so clear is whether the
> benefits outweigh the costs on all that many problems".

I would say that if developing and using a dynamic program costs X
then developing and using a corresponding static program might cost AX
for some A that varies from problem to problem.  There is a
so-far-unresolved question about whether A in general tends to be
greater than 1 or less than 1, and that's why these battles keep
raging.  If A > 1 then dynamic can be said in some sense to "win" and
vice versa.

What I would say is there is good evidence that A is less than (say)
100.  Any real-world application that you can do in Lisp, I can
probably do in ML if I spend 100x as much.  Would you dispute that?

What I don't think there's such good evidence for is that A is greater
than 0.01.  By that I mean, static languages are better at reducing
the likelihood of long-tail failures, to the point that if I write a
sufficiently paranoid static program with correctness proofs, you
might not be able to develop a dynamic counterpart even with 100x the
expenditure, if its reliability is based only on behavior tests
instead of proofs.  Of course the "expenditure" includes the cost of
cleaning up after a possible plane crash or nuclear meltdown caused by
a software defect.  So I think the high-assurance crowd will stay with
static languages, by which I would include things like Lisp code
certified with ACL2.

> Since you have admitted that theoretical arguments are futile here,
> since you can't prove your point theoretically, where is your
> experimental evidence that dynamic languages are only suitable for
> toy problems?

Well, it was Ingo that said that, and I think he was using a pretty
sweeping definition of toy problems.  I might have a problem in which
data corruption is intolerable but I can live with the program
crashing with exception traces as long as it doesn't corrupt data.  So
I'm fine writing stuff like that in Lisp or Python, with assertions
all over the place that can fail at runtime.  I'm in a different
situation if the runtime is not allowed to fail.

Anyway I used to be a C programmer and later switched to Lisp and
Python, which I liked a lot better.  I'm a newbie to these more
advanced static languages, so the interest that I've taken in them
relies mostly on the impressions I get from their users who are much
more experienced with them than I am.  But I do notice that:

  1) There are a lot of ML programmers who have considerable Lisp
     experience but are happier using ML so they stay with it.

  2) There don't seem to be nearly as many programmers with a lot
     of experience in both languages, who are happier with Lisp.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5sa4oq6.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 17:4:
> [S]tatic languages are better at reducing the likelihood of long-tail
> failures, to the point that if I write a sufficiently paranoid static
> program with correctness proofs, you might not be able to develop a dynamic
> counterpart even with 100x the expenditure, if its reliability is based
> only on behavior tests instead of proofs.  Of course the "expenditure"
> includes the cost of cleaning up after a possible plane crash or nuclear
> meltdown caused by a software defect.  So I think the high-assurance crowd
> will stay with static languages

Nice use of scare tactics, with your plane crash and nuclear horror.  Add in
the deaths of some children, and maybe a Hitler reference, and I'm sure you'll
have won the argument!

OK, let's get real here.  Are you seriously so naive as to believe that
compile-time type safety proofs are identical with proofs of program
correctness?  You do realize, I hope, that there's a whole subfield of
computer science involved with proving programs correct.  And that this is
a _different_ field than the one interested in static type checking in
programming languages.

Having source code pass a static type checker indeed eliminates a particular
class of possible bugs.  We can argue about how big a class those bugs are,
how common, whether they're also eliminated by other parts of a good
programmer's methodology, etc.  But the one thing we shouldn't need to argue
about is whether a program that passes a static type checker is correct or
not.

None of this discussion is about correctness proofs.

And in any case, neither static/dynamic type checking, nor even real proofs
of program correctness (which have their own set of issues, and generally
only apply to very simple programs) really have much of anything to do with
methodologies for writing reliable software to pilot airplanes or control
nuclear reactors.

That too is an interesting topic, and static type checking might be one small
tool in the large set of requirements and responsibilities for a reliable
software methodology.

But seriously.  Stop pretending that compile-time static type checking is
somehow equivalent to proving that a program exhibits correct behavior.

> I might have a problem in which data corruption is intolerable but I can
> live with the program crashing with exception traces as long as it doesn't
> corrupt data.  So I'm fine writing stuff like that in Lisp or Python, with
> assertions all over the place that can fail at runtime.  I'm in a different
> situation if the runtime is not allowed to fail.

Again, you're confusing a type system with some kind of verification of
100% uptime.  And you're a fool if you think that a static type checker
somehow provides such a guarantee.

A static type checker imposes a few more requirements on a programmer and a
programming language, and in exchange it uses limited inference to catch a
small class of bugs at compile time.  That's all it is.  It is not the
solution to all programming.  It provides no guarantees of how the behavior
of the resulting program works in the real world.  Nothing about correctness,
nothing about percent uptime.

The ONLY focused discussion that makes sense is: If a programmer has 10 (or
whatever) hours to write some code, and they have an option of using a
language with strong compile-time type checks, and also an option to use a
language with dynamic types, on average what kind of code gets produced
(assuming expert-level programming skills with either language)?  Which
methodology results in code with fewer bugs?  Which results in code with
more features?  Which code is easier to maintain as requirements change?
Etc.

Static typing fans always assert that their methodology will necessarily
result in code with fewer bugs, but this assertion has precious little
evidence to back it up.  And there are some reasons to believe it may be
false.  (For example, it may require more time to write code using that
methodology, while the bugs caught that way may be rare or also caught using
other parts of the alternate programming methodology.  Meanwhile, if the
dynamic typing programmer IS able to program more quickly -- still to be
proven too, of course -- there is a thought that the additional time he saves
could be used for additional testing.  Say, to be sure that the implemented
algorithm is actually the correct one for the real problem.  It is far, far
from clear that a static typing methodology results in superior code on
average.)

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
When you die, if you go somewhere where they ask you a bunch of questions about
your life and what you learned and all, I think a good way to get out of it is
just to say, "No speaka English."  -- Deep Thoughts, by Jack Handey
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x4pjeef2m.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> OK, let's get real here.  Are you seriously so naive as to believe that
> compile-time type safety proofs are identical with proofs of program
> correctness?  You do realize, I hope, that there's a whole subfield of
> computer science involved with proving programs correct.  

Remember that we're on a subthread where you brought up using advanced
type systems to represent whether numbers could be Sierpinski numbers.
That requires all the machinery that's used in correctness proofs, not
general purpose typeful language stuff.  Hey, it's a tangent, but it's
your tangent too.

> And that this is a _different_ field than the one interested in
> static type checking in programming languages.

No.  Same field, or anyway closely overlapping.  Correctness proofs
are why the PL theory community is so interested in elaborate type
systems.  See for example Concoqtion, or the Wikipedia article about
constructive type theory.

> Having source code pass a static type checker indeed eliminates a
> particular class of possible bugs.  We can argue about how big a
> class those bugs are, how common, whether they're also eliminated by
> other parts of a good programmer's methodology, etc.  But the one
> thing we shouldn't need to argue about is whether a program that
> passes a static type checker is correct or not.
> None of this discussion is about correctness proofs.

Passing a static type checker means the program is free of certain
classes of defects.  Fancier type systems and checkers do the same
thing, at finer granularity.  The really fancy ones are integrated
with theorem provers and proof assistants so they can check assertions
of arbitrary complexity, like whether something is a Sierpinski
number.  That what is meant by correctness proof--a proof that the
program satisfies certain assertions.  They used to be done with tools
like Floyd-Hoare logic and they still are, but (at least here on clf)
the interesting stuff is being done using type systems.

> But seriously.  Stop pretending that compile-time static type checking is
> somehow equivalent to proving that a program exhibits correct behavior.

Type checking doesn't prove correct program behavior--it proves the
absence of certain (presumably incorrect) behaviors (Pierce 2002).
That is often a higher level of assurance than simply testing the code
and not observing the behavior from the test data.

> > So I'm fine writing stuff like that in Lisp or Python, with
> > assertions all over the place that can fail at runtime.  I'm in a
> > different situation if the runtime is not allowed to fail.
> 
> Again, you're confusing a type system with some kind of verification of
> 100% uptime.  And you're a fool if you think that a static type checker
> somehow provides such a guarantee.

It can in many cases guarantee the same things that the assertions
check for, which means the assertions can't fail.  So there's that
many fewer places where the program can crash.  Really, you're doing
pretty well in the strawman department.  I haven't made any claims
like what you're saying.

> A static type checker imposes a few more requirements on a
> programmer and a programming language, and in exchange it uses
> limited inference to catch a small class of bugs at compile time.
> That's all it is.  It is not the solution to all programming.

You're acting like that "small class of bugs" was something trivial
when in fact it's enormous.

> The ONLY focused discussion that makes sense is: If a programmer has 10 (or
> whatever) hours to write some code, and they have an option of using a
> language with strong compile-time type checks, and also an option to use a
> language with dynamic types, on average what kind of code gets produced
> (assuming expert-level programming skills with either language)?  Which
> methodology results in code with fewer bugs?  Which results in code with
> more features?  Which code is easier to maintain as requirements change?
> Etc.

Heh, sounds like a programming contest.  OK then.  From
http://en.wikipedia.org/wiki/ICFP_Programming_Contest :

  1998, winner used Cilk (a parallel C dialect, i.e. static weak types)
  1999, winner used OCaml
  2000, winner used OCaml
  2001, winner used Haskell
  2002, winner used OCaml
  2003, winner used C++
  2004, winner used Haskell
  2005, winner used Haskell
  2006, winner used "C++, Haskell, Python, Bash, 2D and a private
                    Google language."

Looks pretty bleak for dynamic languages in those results.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87wswa2fzs.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 22:1:
> Don Geddis <···@geddis.org> writes:
>> OK, let's get real here.  Are you seriously so naive as to believe that
>> compile-time type safety proofs are identical with proofs of program
>> correctness?  You do realize, I hope, that there's a whole subfield of
>> computer science involved with proving programs correct.  
>
> Remember that we're on a subthread where you brought up using advanced
> type systems to represent whether numbers could be Sierpinski numbers.
> That requires all the machinery that's used in correctness proofs, not
> general purpose typeful language stuff.  Hey, it's a tangent, but it's
> your tangent too.

Both fields use theorem provers, it is true.  So does AI.  That doesn't make
all these fields identical.

At the very least, a proof of program correctness needs some formal
description of the intent of the algorithm.  And then the proof matches the
declarative specification of what should be the result of the computation,
with an analysis of the necessary behavior of the source code presented to
it.

Static type checkers are never given a specification of program behavior.
They can't possibly be doing any kind of correctness proof of this nature.

> Correctness proofs are why the PL theory community is so interested in
> elaborate type systems.

I agree that, if your interest is in proofs of correctness, then static type
systems are surely a tool to assist in that field.

But that's not what we've been talking about, is it?  We can have a
discussion about the utility of proofs of program correctness if you want.
That topic is also controversial.  But it's a different topic.

Up to now, the assertion has been that languages with static typing result in
better programs, EVEN WITHOUT any proof of program correctness.  So you can't
use correctness as a justification for why a programmer should submit to a
static typing methodology.

> Passing a static type checker means the program is free of certain
> classes of defects.  Fancier type systems and checkers do the same
> thing, at finer granularity.  The really fancy ones are integrated
> with theorem provers and proof assistants so they can check assertions
> of arbitrary complexity, like whether something is a Sierpinski
> number.

All possible.  (At least, after taking into account Pascal's observation that
what static type checkers detect overlaps, but does not replace, runtime
type errors.)

> That what is meant by correctness proof--a proof that the program satisfies
> certain assertions.

No, that's not what is meant by a correctness proof of a program.  Not that
it satisfies "certain" assertions (like the types of arguments and
functions).

A program correctness proof is something that shows the behavior of the
program matches the abstract specification.  For example, that a sort
algorithm actually returns items in sorted order.  Not merely the presence or
absence of runtime type errors.  Type checking is a far, far simpler problem
than correctness.

> Type checking doesn't prove correct program behavior--it proves the
> absence of certain (presumably incorrect) behaviors (Pierce 2002).
> That is often a higher level of assurance than simply testing the code
> and not observing the behavior from the test data.

Ah, but testing the code gives you far more information than just whether
type errors appear.  For example, it gives you information about whether the
program is actually correct (like whether the numbers wind up sorted or not).

You're vastly overselling type checking, to equate it in any way with the
results of testing the code.  Verifying type safety -- which compile-time
type checking may, or may not, do better -- is but one tiny fraction of the
purpose of actually testing the code.

>> > So I'm fine writing stuff like that in Lisp or Python, with
>> > assertions all over the place that can fail at runtime.  I'm in a
>> > different situation if the runtime is not allowed to fail.
>> 
>> Again, you're confusing a type system with some kind of verification of
>> 100% uptime.  And you're a fool if you think that a static type checker
>> somehow provides such a guarantee.
>
> It can in many cases guarantee the same things that the assertions
> check for, which means the assertions can't fail.  So there's that
> many fewer places where the program can crash.

A type error doesn't require a program to crash, or server uptime to be
violated.  Languages with dynamic typing can recover from type errors, if
the programmer has that goal in mind.

> Really, you're doing pretty well in the strawman department.  I haven't
> made any claims like what you're saying.

Really?  Was it not you who wrote
        Of course the "expenditure" includes the cost of cleaning up after a
        possible plane crash or nuclear meltdown caused by a software defect.
and
        I'm in a different situation if the runtime is not allowed to fail.

Did you not mean to imply that, in cases where the quality of the final code
matters for some critical function, static type checking NECESSARILY results
in superior code?  That anyone who needed to deliver such systems, and choose
a dynamic language to implement the solution, must be an uneducated fool?

I directly contradict your claim that static typing is necessarily useful
for piloting planes, running nuclear powerplants, or implementing servers
with 100% uptime.

You're the one who brought the examples up.  Defend them.  You're the one
who keeps making snide asides that programmers who prefer languages with
dynamic typing don't care about the quality of their delivered code.  That's
insulting and wrong.

>> A static type checker imposes a few more requirements on a programmer and
>> a programming language, and in exchange it uses limited inference to catch
>> a small class of bugs at compile time.
>
> You're acting like that "small class of bugs" was something trivial when in
> fact it's enormous.

There is great debate about how large this class of bugs is, and in
particular whether other aspects of good programming methodology can detect
the vast majority of them already.

There are costs, and there are benefits.  It's far from clear that the
benefits outweigh the costs.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Communist China is technologically underdeveloped because they have no alphabet
and therefore cannot use acronyms to communicate ideas at a faster rate.
	-- Omni
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <09ps20lp9f.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:


> You're vastly overselling type checking, to equate it in any way with the
> results of testing the code.  Verifying type safety -- which compile-time

No, we don't. It was on your side that people started to imply that
the proponents of static type checking pretend that static type
checking substitutes testing and program verification. It doesn't, but
it excludes a certain class of errors that are annoying enough that
static typing is worth the effort. Also static typing prepares the
arena for unit tests and verification: You already have verified
_part_ of the specfication.


> type checking may, or may not, do better -- is but one tiny fraction of the
> purpose of actually testing the code.

As it turns out: Not quite so tiny: It vastly simplifies the task of
testing and verification (not to forget maintenance).

-- M
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <i3ir7sk8be.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Up to now, the assertion has been that languages with static typing result in
> better programs, EVEN WITHOUT any proof of program correctness.  So you can't
> use correctness as a justification for why a programmer should submit to a
> static typing methodology.

Lets define "partial correctness" as the absence of a certain class of
deviations from the specfication. Static typing certainly furthers
partial correctness in this sense and makes it much more easier to
root out the rest of the errors (largely regardless of the method
chosen).

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f94279$5ol$1@online.de>
Paul Rubin schrieb:
> Don Geddis <···@geddis.org> writes:
>> The ONLY focused discussion that makes sense is: If a programmer has 10 (or
>> whatever) hours to write some code, and they have an option of using a
>> language with strong compile-time type checks, and also an option to use a
>> language with dynamic types, on average what kind of code gets produced
>> (assuming expert-level programming skills with either language)?  Which
>> methodology results in code with fewer bugs?  Which results in code with
>> more features?  Which code is easier to maintain as requirements change?
>> Etc.
> 
> Heh, sounds like a programming contest.  OK then.  From
> http://en.wikipedia.org/wiki/ICFP_Programming_Contest :
> 
>   1998, winner used Cilk (a parallel C dialect, i.e. static weak types)
>   1999, winner used OCaml
>   2000, winner used OCaml
>   2001, winner used Haskell
>   2002, winner used OCaml
>   2003, winner used C++
>   2004, winner used Haskell
>   2005, winner used Haskell
>   2006, winner used "C++, Haskell, Python, Bash, 2D and a private
>                     Google language."
> 
> Looks pretty bleak for dynamic languages in those results.

When I took a closer look at the ICFP results, I found that Lisp would 
usually win zero to three of the first ten places.
I haven't been able to correlate this with the number of contestants in 
each language. It would be interesting to do that.

Regards,
Jo
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xejigymrv.fsf@ruckus.brouhaha.com>
Joachim Durchholz <··@durchholz.org> writes:
> When I took a closer look at the ICFP results, I found that Lisp would
> usually win zero to three of the first ten places.
> I haven't been able to correlate this with the number of contestants
> in each language. It would be interesting to do that.

The web sites for the contests are each done by that year's contest
organizers, so they're all over the place, have different formats from
one another, and it's not so easy to locate this data.  But in at
least some years they do show how many contestants used each language,
and Lisp is pretty well represented, especially if you count Dylan as
Lisp.

It also seems to me that anyone who wins a contest like that has to be
an extremely smart and knowledgeable programmer, so we should consider
it likely that some of the winners in fact knew all about Lisp and
decided not to use it.  
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m8d4y2611z.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 17:4:
>> [S]tatic languages are better at reducing the likelihood of long-tail
>> failures, to the point that if I write a sufficiently paranoid static
>> program with correctness proofs, you might not be able to develop a dynamic
>> counterpart even with 100x the expenditure, if its reliability is based
>> only on behavior tests instead of proofs.  Of course the "expenditure"
>> includes the cost of cleaning up after a possible plane crash or nuclear
>> meltdown caused by a software defect.  So I think the high-assurance crowd
>> will stay with static languages
>
> Nice use of scare tactics, with your plane crash and nuclear horror.  Add in
> the deaths of some children, and maybe a Hitler reference, and I'm sure you'll
> have won the argument!

No -- a really good example about risk and liability: Improbably but
really expensive failures might still ruin your expectation value.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uj8x8q60z0.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Having source code pass a static type checker indeed eliminates a particular
> class of possible bugs.  We can argue about how big a class those bugs are,

It also prepares the field for verification. Verification is easier
and shorter on top of a static type system (and with statically
typable programs).

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ir7t1hdo.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Sun, 05 Aug 2007:
> Don Geddis wrote:
>> Having source code pass a static type checker indeed eliminates a particular
>> class of possible bugs.  We can argue about how big a class those bugs are
>
> It also prepares the field for verification. Verification is easier and
> shorter on top of a static type system (and with statically typable
> programs).

Yes, that I agree with.  If you're interested in automatic program
verification, you almost certainly want to build on top of a language with
a static type system.

Unfortunately for this argument, automatic program verification also has
played an almost non-existent role in producing any valuable large software
system.  We can argue about why, but the fact is that it's simply not a tool
in use by the vast majority of software producers.

Hence, this is not a good argument for adopting a static type system in a
programming language.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If you're in a boxing match, try not to let the other guy's glove touch your
lips, because you don't know where that glove has been.
	-- Deep Thoughts, by Jack Handey
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <svk5s92fgc.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Sun, 05 Aug 2007:
>> Don Geddis wrote:
>>> Having source code pass a static type checker indeed eliminates a particular
>>> class of possible bugs.  We can argue about how big a class those bugs are
>>
>> It also prepares the field for verification. Verification is easier and
>> shorter on top of a static type system (and with statically typable
>> programs).
>
> Yes, that I agree with.  If you're interested in automatic program
> verification, you almost certainly want to build on top of a language with
> a static type system.

The same applies to manual verification, especially, actually. If you
observer people "verifying" manually program in a dynamically types
language you find them makeing arguments like "this procedure handels
strings and nil, so I have to show here, that the expression in the
invocation of the procedure is either nil or string". I call the
do-it-yourself type inference.

As far as automatic verification goes, either the annotation language
has to have a type system of some kind or the verifier lumps data flow
analysis together with something that might as well be termed type
inference.

Any way -- verification involves typing, implicitely or explicitly.


> Unfortunately for this argument, automatic program verification also has
> played an almost non-existent role in producing any valuable large software
> system.  

I think you're mistaken here. (1) That Windows and MS Office (two very
common "large software systems" are not verified doesn't mean that
there are no verified systems out there and (2) most verification is
taking place using a mixture of assisted proof, manual verification
and automatic verification guide by proof annotation.

> We can argue about why, but the fact is that it's simply not a tool
> in use by the vast majority of software producers.

Again the wrong argument: Since we don't use verification, we don't
use the benefits of a type system (which I'd term the little brother
of verification).

> Hence, this is not a good argument for adopting a static type system in a
> programming language.

Hence your arguments are no good arguments against doing so.

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87y7goy29i.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Mon, 06 Aug 2007:
> Don Geddis wrote:
>> If you're interested in automatic program verification, you almost
>> certainly want to build on top of a language with a static type system.
>
> The same applies to manual verification, especially, actually. If you
> observer people "verifying" manually program in a dynamically types
> language you find them makeing arguments like "this procedure handels
> strings and nil, so I have to show here, that the expression in the
> invocation of the procedure is either nil or string". I call the
> do-it-yourself type inference.

Verifying real-world code is not done by manual type inference.  That's not
what program verification is about.

> As far as automatic verification goes, either the annotation language has
> to have a type system of some kind or the verifier lumps data flow analysis
> together with something that might as well be termed type inference.

Yes, I agree that type inference is basically a small subset of automatic
program verification.

The point is that automatic program verification is not a significant tool
in building real-world valuable software systems.

So it doesn't matter whether static typing helps with verification or not.
That's not a good reason for a programmer to choose a language that offers
static typing.

> I think you're mistaken here. (1) That Windows and MS Office (two very
> common "large software systems" are not verified doesn't mean that
> there are no verified systems out there

Sure, but I didn't mention Windows and MS Office.

So let's start listing the world's most valuable or popular software systems,
and see if you can find any that used automatic program verification in any
significant way:
. MS Windows, Excel, Word, Powerpoint, IE
. Oracle database
. SAP
. Netscape/Firefox
. Linux
. Mac OSX, iTunes, iPhoto, iDVD
. FAA airport air traffic control
. IRS tax systems
. Hedge fund automatic program equity trading

Gosh, I'm coming up blank here.  None of them were created with such a tool.

Perhaps you, instead, can list the most valuable software systems that WERE
verified.

> and (2) most verification is taking place using a mixture of assisted
> proof, manual verification and automatic verification guide by proof
> annotation.

Sure, that how software verification works.

The question is the real-world utility of such tools.  Looking at what
actually works in the world suggests: (1) very valuable software is being
created without such tools; and (2) no software that was created using such
tools is all that valuable.

This seems to imply that automatic program verification is not an important
tool for a professional software programmer.

> Again the wrong argument: Since we don't use verification, we don't
> use the benefits of a type system (which I'd term the little brother
> of verification).

At least a portion of YOUR argument was: a static type system is valuable,
because it is a precondition for automatic program verification.

My response is: yes, a static type system is a precursor to verification.
However, verification is not valuable (in general), so that fact that static
typing is useful for verification sheds no light on whether static typing is
useful in general.

Static typing may be useful in general, or it may not be.  But its use for
automatic program verification is not a reason to believe that static typing
is useful in general.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
But this long run is a misleading guide to current affairs.  In the long run we
are all dead.  Economists set themselves too easy, too useless a task if in
tempestuous seasons they can only tell us that when the storm is long past the
ocean is flat again.
        -- John Maynard Keynes, "A Tract on Monetary Reform", 1923
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xabt3ak3g.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> So let's start listing the world's most valuable or popular software systems,
> and see if you can find any that used automatic program verification in any
> significant way:
> . MS Windows, Excel, Word, Powerpoint, IE

Are you really saying your dynamic languages are going to lead to code
as unreliable as this crap?  That is a pretty poor advertisement.

> . Linux

That is an interesting one, it's a very widely used program with
published source, carefully studied by lots of smart people, running
on 1000's of important production systems over the world for years and
years and years, with fairly high apparent reliability (generally
beating Windows even though it's harder to search for errors in a
closed-source program).  Then Dawson Engler ran a bunch of static
analysis tools and found HUNDREDS of bugs, some very serious,
including in some of the most intensively tested and critical parts
of the code, such as the packet filter
(http://hci.stanford.edu/cstr/reports/2006-01.pdf p. 16).  What that
shows is that static analysis finds bugs that testing utterly
fails to find.

> . Mac OSX, iTunes, iPhoto, iDVD

Nobody's butt is in a sling if this stuff fails, so they don't bother.

> . FAA airport air traffic control

How about the UK:   http://www.mil-embedded.com/news/db/?6886

> . Hedge fund automatic program equity trading

Well, we at least see that some of those folks are using ML, which is
a start.

> Perhaps you, instead, can list the most valuable software systems that WERE
> verified.

How about the VHDL in Intel and AMD microprocessors.  One little bug
in the Pentium FDIV instruction and Intel took a $475 million charge.
They are doing what they can to not let THAT happen again, which
includes formal verification.

Also the Javacard bytecode interpreter, running on 10's of millions
(maybe 100's of millions) of computers (the microprocessor chips
inside smart credit and debit cards).  An exploit against that code
could lead to malicious card readers being able to suck the crypto
credentials out of those cards on a massive scale, letting the
perpetrators drain the corresponding bank accounts by the millions.
So they went the verification route.

> The question is the real-world utility of such tools.  Looking at what
> actually works in the world suggests: (1) very valuable software is being
> created without such tools; 

Yes, and it's awful bug-ridden crap!!!  Do you think that the number
of dollars that corporate hucksters are able to squeeze out of it
(while manipulating the laws to avoid liability for its failures) is
some indication of how great its programming methodology is?  Is that
the kind of code you write in Lisp all day because you think it's good
enough?  Now I'm convinced, I better give up Lisp once and for all!

> and (2) no software that was created using such tools is all that
> valuable.

No.  See above, look at the Spark-Ada stuff in military systems, the
Mondex certification authority, yada yada.  This is still a new field
(someone once said that "software development is in the
hunter-gatherer stage") so you're looking in the wrong direction by
asking about past systems anyway.

> This seems to imply that automatic program verification is not an important
> tool for a professional software programmer.

Well ok, I'll agree that there's still plenty of buggy legacy code for
obsolescing Lisp programmers to keep maintaining as they creak towards
retirement.  However, some of us want to write stuff that works, which
means looking for better methods.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-18A2B1.11282007082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> > Perhaps you, instead, can list the most valuable software systems that WERE
> > verified.
> 
> How about the VHDL in Intel and AMD microprocessors.  One little bug
> in the Pentium FDIV instruction and Intel took a $475 million charge.
> They are doing what they can to not let THAT happen again, which
> includes formal verification.

Right and ACL2 (Applicative Common Lisp) seems to be a somewhat popular tool.
http://www.cs.utexas.edu/users/moore/acl2/

If you look at the program committee for their 2007
workshop you see people from Intel, Rockwell Collins, AMD, and others.
http://www.cs.uwyo.edu/~ruben/acl2-07/Main/ProgramCommittee

> 
> Also the Javacard bytecode interpreter, running on 10's of millions
> (maybe 100's of millions) of computers (the microprocessor chips
> inside smart credit and debit cards).  An exploit against that code
> could lead to malicious card readers being able to suck the crypto
> credentials out of those cards on a massive scale, letting the
> perpetrators drain the corresponding bank accounts by the millions.
> So they went the verification route.

http://www.cs.utexas.edu/users/moore/publications/acl2-papers.html
 
> > The question is the real-world utility of such tools.  Looking at what
> > actually works in the world suggests: (1) very valuable software is being
> > created without such tools; 
> 
> Yes, and it's awful bug-ridden crap!!!  Do you think that the number
> of dollars that corporate hucksters are able to squeeze out of it
> (while manipulating the laws to avoid liability for its failures) is
> some indication of how great its programming methodology is?  Is that
> the kind of code you write in Lisp all day because you think it's good
> enough?  Now I'm convinced, I better give up Lisp once and for all!

Calm down. ;-)
 
> > and (2) no software that was created using such tools is all that
> > valuable.
> 
> No.  See above, look at the Spark-Ada stuff in military systems, the
> Mondex certification authority, yada yada.  This is still a new field
> (someone once said that "software development is in the
> hunter-gatherer stage") so you're looking in the wrong direction by
> asking about past systems anyway.
> 
> > This seems to imply that automatic program verification is not an important
> > tool for a professional software programmer.
> 
> Well ok, I'll agree that there's still plenty of buggy legacy code for
> obsolescing Lisp programmers to keep maintaining as they creak towards
> retirement.  However, some of us want to write stuff that works, which
> means looking for better methods.

Above paragraph is, sorry, mostly bullshit. Do you think I don't
want to write software that works? Bullshit.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x7io7znt9.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> Right and ACL2 (Applicative Common Lisp) seems to be a somewhat popular tool.
> http://www.cs.utexas.edu/users/moore/acl2/

Yes, ACL2 is very cool, it was used to prove the AMD K5 floating point
division microcode after the famous Pentium FDIV bug.

> If you look at the program committee for their 2007
> workshop you see people from Intel, Rockwell Collins, AMD, and others.
> http://www.cs.uwyo.edu/~ruben/acl2-07/Main/ProgramCommittee

Also cool.  Interesting that John Matthews (a Haskell wizard, unless
I'm confused) is on that committee.  I recognize a couple of other
names too (besides Moore, of course).  John Harrison wrote HOL Light
(in ML, I guess), which is being used for the Flyspeck project (formal
proof Kepler's conjecture), which will be the biggest formal
mathematical proof ever done.

> Above paragraph is, sorry, mostly bullshit. Do you think I don't
> want to write software that works? Bullshit.

Heh, I'm mostly hassling Don for his ridiculous crap about Windows and
MS Office being such shining examples of valuable and successful
software.  It's a pretty sad day when someone aspires to programming
standards like that.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-350A6C.11562007082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > Right and ACL2 (Applicative Common Lisp) seems to be a somewhat popular tool.
> > http://www.cs.utexas.edu/users/moore/acl2/
> 
> Yes, ACL2 is very cool, it was used to prove the AMD K5 floating point
> division microcode after the famous Pentium FDIV bug.
> 
> > If you look at the program committee for their 2007
> > workshop you see people from Intel, Rockwell Collins, AMD, and others.
> > http://www.cs.uwyo.edu/~ruben/acl2-07/Main/ProgramCommittee
> 
> Also cool.  Interesting that John Matthews (a Haskell wizard, unless
> I'm confused) is on that committee.  I recognize a couple of other
> names too (besides Moore, of course).  John Harrison wrote HOL Light
> (in ML, I guess), which is being used for the Flyspeck project (formal
> proof Kepler's conjecture), which will be the biggest formal
> mathematical proof ever done.
> 
> > Above paragraph is, sorry, mostly bullshit. Do you think I don't
> > want to write software that works? Bullshit.
> 
> Heh, I'm mostly hassling Don for his ridiculous crap about Windows and
> MS Office being such shining examples of valuable and successful
> software.  It's a pretty sad day when someone aspires to programming
> standards like that.

As a user and owner of MS Office I think there is some
diversity in quality. On my Mac, Word is buggy. Powerpoint
is a lot better. Excel seems to work fine. The Mail client
is buggy. I'm not a big fan of their products, but Excel
in general is quite good. Given that the software has a huge
range of functionality and the development of such a product
has lots of interesting problems beyond typing. I would
give them some bonus, since few office products of this
scale have been written.

When we see a 'better' programming language and a better
programming methodology applied in a similar sized
software with better success - then we should judge.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <78abt32vw1.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@ruckus.brouhaha.com>,
>  Paul Rubin <·············@NOSPAM.invalid> wrote:
>
>> Rainer Joswig <······@lisp.de> writes:
>> > Right and ACL2 (Applicative Common Lisp) seems to be a somewhat popular tool.
>> > http://www.cs.utexas.edu/users/moore/acl2/
>> 
>> Yes, ACL2 is very cool, it was used to prove the AMD K5 floating point
>> division microcode after the famous Pentium FDIV bug.
>> 
>> > If you look at the program committee for their 2007
>> > workshop you see people from Intel, Rockwell Collins, AMD, and others.
>> > http://www.cs.uwyo.edu/~ruben/acl2-07/Main/ProgramCommittee
>> 
>> Also cool.  Interesting that John Matthews (a Haskell wizard, unless
>> I'm confused) is on that committee.  I recognize a couple of other
>> names too (besides Moore, of course).  John Harrison wrote HOL Light
>> (in ML, I guess), which is being used for the Flyspeck project (formal
>> proof Kepler's conjecture), which will be the biggest formal
>> mathematical proof ever done.
>> 
>> > Above paragraph is, sorry, mostly bullshit. Do you think I don't
>> > want to write software that works? Bullshit.
>> 
>> Heh, I'm mostly hassling Don for his ridiculous crap about Windows and
>> MS Office being such shining examples of valuable and successful
>> software.  It's a pretty sad day when someone aspires to programming
>> standards like that.
>
> As a user and owner of MS Office I think there is some

As a user and owner of TeX, I think that not having bugs is rather
important ;-).

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ejievr31.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 07 Aug 2007 00:1:
> Don Geddis <···@geddis.org> writes:
>> So let's start listing the world's most valuable or popular software
>> systems, and see if you can find any that used automatic program
>> verification in any significant way:
>> . MS Windows, Excel, Word, Powerpoint, IE
>
> Are you really saying your dynamic languages are going to lead to code
> as unreliable as this crap?  That is a pretty poor advertisement.

Try to stay on topic.  The question of interest was whether automatic program
verification (correctness proofs) are a methodology that helps create valuable
software.  My assertion is that it is not a tool that is used to create
valuable software.  I've attempted to list some valuable software off the top
of my head.  None of them seem to have been programmed with the assistance
of correctness proofs.

As for whether they are "crap", your criticism is uninteresting unless you
can point to superior software that solves the same problem.  Otherwise, the
most likely case is that you simply don't understand the problem, and have
radically underestimated how difficult it was to create those pieces of
software.

>> . Linux
>
> That is an interesting one, it's a very widely used program with
> published source, carefully studied by lots of smart people, running
> on 1000's of important production systems over the world for years and
> years and years, with fairly high apparent reliability (generally
> beating Windows even though it's harder to search for errors in a
> closed-source program).  Then Dawson Engler ran a bunch of static
> analysis tools and found HUNDREDS of bugs, some very serious,
> including in some of the most intensively tested and critical parts
> of the code, such as the packet filter
> (http://hci.stanford.edu/cstr/reports/2006-01.pdf p. 16).  What that
> shows is that static analysis finds bugs that testing utterly
> fails to find.

Yes, yes, super.

Except that you have given an example of the benefits of static type checking
(vs. testing), which is not the topic.

The topic we were discussing, and which my examples were directed towards,
is automatic program verification.

>> Perhaps you, instead, can list the most valuable software systems that WERE
>> verified.
>
> How about the VHDL in Intel and AMD microprocessors.

That's hardware, not software.  And they only verified a tiny, tiny, fraction
of the processor, not the whole processor.

> One little bug in the Pentium FDIV instruction and Intel took a $475
> million charge.  They are doing what they can to not let THAT happen again,
> which includes formal verification.

I agree that hardware processor verification is one of the very few examples
of even a tiny bit of real-world utility from that community.

Now, try to find an installed, popular software system that was verified
correct.

> Also the Javacard bytecode interpreter [...]
> So they went the verification route.

Really?  That would be interesting.  Is it true?  Do you have references?
I'd be surprised if they actually had a formal proof that the interpreter
is "correct".  What does the specification look like, for example?

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If you ever have to steal money from your kid, and later on he discovers it's
gone, I think a good thing to do is to blame it on Santa Claus.
	-- Deep Thoughts, by Jack Handey
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xr6melrx9.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> Try to stay on topic.  The question of interest was whether
> automatic program verification (correctness proofs) are a
> methodology that helps create valuable software.  My assertion is
> that it is not a tool that is used to create valuable software.

Your idea of valuable software seems to be crappy, bug-ridden software
that still sometimes does useful things, which its vendors collect a
ton of cash for its successes while shifting the costs of its failures
onto other people.  Sorry, while avoiding responsibility for the
consequences of crap software might be worth studying as a business
technique, I don't see it as indicating good programming practices.

I suggested the Intel (post-FDIV) Pentium microcode, and Intel's
profits are doing fine, thank you.  See, the difference between Intel
and Microsoft is that when yet another virus-borne buffer overflow
exploit in Windows turns millions of PC's into zombies and causes a
huge disruption, loss of data, privacy breaches, etc, Microsoft
eventually puts out a patch and that's the end of their
responsibility.  When the FDIV bug occurred, even though it had almost
no real consequences for anyone, Intel had to eat the costs to the
tune of $475 million.  So Intel uses plenty of verification, to
prevent this type of thing from occuring more ofen.

The reason you don't see Microsoft using verification is because you
and I, rather than Bill G., are the ones who pay for the consequences
of Microsoft's bugs.  Bill G. gets the benefits while we get the costs.
Change the liability statutes so Microsoft has to pay for its own
bugs, and you'll see a lot more verification in use up there, just
like it's used at Intel.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87r6meu2k7.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 07 Aug 2007 18:4:
> Don Geddis <···@geddis.org> writes:
>> Try to stay on topic.  The question of interest was whether
>> automatic program verification (correctness proofs) are a
>> methodology that helps create valuable software.  My assertion is
>> that it is not a tool that is used to create valuable software.
>
> Your idea of valuable software seems to be crappy, bug-ridden software
> that still sometimes does useful things, which its vendors collect a
> ton of cash for its successes

Sure, usually revenues can be a close analogy to value.  But if you want some
other definition of valuable, go ahead an propose it.  I don't care.
Automatic program verification simply isn't part of the general methodology
for software development, anywhere (outside small parts of academia).

> I suggested the Intel (post-FDIV) Pentium microcode

Yes, but that's hardware, not software.  And it's hardly the whole chip that
was verified, just a tiny piece.

But sure, I know you're trying.

> See, the difference between Intel and Microsoft is that [...]
> The reason you don't see Microsoft using verification [...]

You're apparently so full of rage at Microsoft, that you've lost track of the
topic.  I'm not a huge fan of Microsoft myself, but I happen to disagree with
you on this particular point.  Still, it's a distraction, and we shouldn't let
that get in the way of your primary failing.

Which is your assertion that automatic program verification is a useful
methodology for real-world software programming.  (And your related assertion
that programming languages with static typing are useful BECAUSE they make
the correctness proofs easier for program verification.)

You haven't offered up any definitions yourself, so I'm going to have to try
to guess what you might find valuable.  You're such a raving loon about
Microsoft, it seems likely that you're an open source fan.  So how about:
. Emacs (editor)
. Linux (OS)
. Apache (web server)
. Firefox (web browser)
. Perl (programming language)
. Python (programming language)
. Gimp (image/drawing)

All are generally recognized as very high quality pieces of software, with
extremely useful functionality.  They're also very popular, installed on
millions of PCs.

None of them used even a whiff of automatic program verification during their
construction.

Can you find software, of a similar (or better!) class in quality and
popularity, that DID take advantage of automatic program verification?

Or could it possibly be that you've oversold the benefit of those tools?

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Poor planning on your part does not constitute an emergency on mine.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xlkcmzkvj.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> Can you find software, of a similar (or better!) class in quality and
> popularity, that DID take advantage of automatic program verification?

I told you, Javacard bytecode interpreter, running on at least 10
million computers (embedded in credit cards).  How many computers do
you think Oracle is running on?
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <878x8m2cse.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 07 Aug 2007 21:5:
> Don Geddis <···@geddis.org> writes:
>> Can you find software, of a similar (or better!) class in quality and
>> popularity, that DID take advantage of automatic program verification?
>
> I told you, Javacard bytecode interpreter

If true, that would be interesting.  But at the moment, I don't believe you.

When you posted this previously, I asked for documentation of your claim.
Can you show me evidence anywhere that Javacard has been "proven correct"
via automatic (or mixed auto/manual) methods?  What does the specification
look like, to which the code has been proven equivalent?

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Those who will not reason, perish in the act.
Those who will not act, perish for that reason.
	-- W. H. Auden, _Shorts_
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46BA9A76.8090302@gmail.com>
Don Geddis wrote:
> Paul Rubin <·············@NOSPAM.invalid> wrote on 07 Aug 2007 21:5:
>> Don Geddis <···@geddis.org> writes:
>>> Can you find software, of a similar (or better!) class in quality and
>>> popularity, that DID take advantage of automatic program verification?
>> I told you, Javacard bytecode interpreter
> 
> If true, that would be interesting.  But at the moment, I don't believe you.

I'm not sure this is what paul had in mind or not.
  http://www.trusted-logic.com/publications.html
I know a lot of systems used in crypto/security have been proven 
correct. Spark ADA comes to mind.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xy7glmfoa.fsf@ruckus.brouhaha.com>
"Daniel C. Wang" <·········@gmail.com> writes:
> >> I told you, Javacard bytecode interpreter
> > If true, that would be interesting.  But at the moment, I don't
> > believe you.
> I'm not sure this is what paul had in mind or not.
>   http://www.trusted-logic.com/publications.html
> I know a lot of systems used in crypto/security have been proven
> correct. Spark ADA comes to mind.

I think Don is right and I got confused about this.  There are quite a
few mechanical verification results related to Javacard but I thought
I'd read somewhere that the actual bytecode interpreter had itself
been verified, and I'm not finding a reference to that.  The bytecode
verifier (i.e. the part of the class loader that analyzes user applets
before running them) has apparently been verified in Isabelle/HOL.
This is maybe one of the most sensitive parts of the software since it
deals with ensuring the safe sandboxing of potentially hostile
applets:

   http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2003/klein.pdf

(the first few pages of this thesis are in German but the file is in
English).  Also interesting:

 Verified Bytecode Verification and Type-Certifying Compilation
(http://www.irit.fr/~Martin.Strecker/Publications/esmart02.pdf)
also using Isabelle.

"Formalising the Safety of Java, the Java Virtual Machine and Java
Card" (http://www.ecs.soton.ac.uk/~lavm/papers/acmcs.pdf) has table on
pages 9 and 17 of stuff done up til around 2000.  Despite its age this
looks like a good survey paper and I'll put it on my endless reading
list.

The search I used for all these was

  http://www.google.com/search?q=javacard+verified

which gets quite a few interesting hits.  It is obvious that this
field is moving forward fast, and those who ignore it will only fall
further and further behind.  Thanks for making me look.

Btw, according to <http://java.sun.com/javaone/sf/pavilion/pods.jsp>
Javacard has been deployed on 2.5 billion cards!  Apparently it is
used in GSM SIM cards in addition to credit cards, which explains
that very large number.
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9AD6A.7080307@gmail.com>
Don Geddis wrote:
{stuff deleted}
> You haven't offered up any definitions yourself, so I'm going to have to try
> to guess what you might find valuable.  You're such a raving loon about
> Microsoft, it seems likely that you're an open source fan.  So how about:
> . Emacs (editor)
> . Linux (OS)
> . Apache (web server)
> . Firefox (web browser)
> . Perl (programming language)
> . Python (programming language)
> . Gimp (image/drawing)
> 
> All are generally recognized as very high quality pieces of software, with
> extremely useful functionality.  They're also very popular, installed on
> millions of PCs.
> 
> None of them used even a whiff of automatic program verification during their
> constructions

Please see

http://en.wikipedia.org/wiki/List_of_tools_for_static_code_analysis
http://en.wikipedia.org/wiki/Sparse (written by Linus himself)
http://www.microsoft.com/whdc/devtools/tools/sdv.mspx (used by microsoft 
and made available to 3rd party developers)

There is also Coverity which got a government contract to find bugs 
statically in open source projects (http://scan.coverity.com/one-year.html)
http://en.wikipedia.org/wiki/Coverity

I'd say at least a "whiff" used in their own going maintenance.

BTW static types play a non-trivial role in many of these tools, as they 
can be used to improve the accuracy of the alias analysis. I suspect 
these tools would not be as effective otherwise.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <874pja2cfq.fsf@geddis.org>
"Daniel C. Wang" <·········@gmail.com> wrote on Wed, 08 Aug 2007:
> Don Geddis wrote:
>> All are generally recognized as very high quality pieces of software, with
>> extremely useful functionality.  They're also very popular, installed on
>> millions of PCs.  None of them used even a whiff of automatic program
>> verification during their constructions
>
> Please see
> http://en.wikipedia.org/wiki/List_of_tools_for_static_code_analysis
> http://en.wikipedia.org/wiki/Sparse (written by Linus himself)
> http://www.microsoft.com/whdc/devtools/tools/sdv.mspx
> http://en.wikipedia.org/wiki/Coverity
> I'd say at least a "whiff" used in their own going maintenance.

These are all tools to examine source code statically, and attempt to
discover possible bugs.  Just like static type checking is such a tool.
Those are all great things, as far as they go.

But they don't address the topic in this subthread.  The question of interest
is "automatic program verification".  Correctness proofs.

Like, for example, if you implemented a quicksort algorithm in some
programming language, you might separately write a specification that:
the code will terminate; the output will be in sorted order; etc.
And then you'd somehow need to prove (automatically, if possible), that
any execution of the source code will necessarily satisfy the requested
specification.

That is automatic program verification.  That is a much more interesting and
difficult topic than the mere "static code analysis" tools you've suggested.

The reason this topic came up, is that in previous posts on this thread some
static typing fans have asserted that statically typed languages are good
BECAUSE they enable automatic program verification.  Not that the resulting
code will have fewer bugs; but that it will actually be correct!

I have been attempting to show that correctness proofs in automatic program
verification have made almost no impact on software development worldwide,
so the fact that static typing helps with correctness proofs is not relevant
to a question of whether a programmer ought to use a language with static
or dynamic typing.

Your examples aren't helpful in resolving this (minor) point.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
How can you tell when sour cream goes bad?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xd4xuyaqs.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> But they don't address the topic in this subthread.  The question of interest
> is "automatic program verification".  Correctness proofs.
> 
> Like, for example, if you implemented a quicksort algorithm in some
> programming language, you might separately write a specification that:
> the code will terminate; the output will be in sorted order; etc.
> And then you'd somehow need to prove (automatically, if possible), that
> any execution of the source code will necessarily satisfy the requested
> specification.

Sure.  You could go even further, and specify and prove that the
maximum recursion depth should be no greater than ceil(1+ log2(N))
which will of course require you to organize the quicksort code the
right way to ensure this (not complicated, but many implementations
overlook it).  If you can do that, great.  If not, you haven't done
everything you might desire, but you've still got something useful.

You could also go less far, by statically verifying something less
than that the output will be in sorted order, but only (say) that the
output will be a list of the same type signature as the input (you
could do that with an annotation) or at any rate that the output will
have the same signature as any other function that it's passed to.
Once again, that might not be everything you might desire, but it
still is useful.

As for proving a complex semantic specification by type checking, that
is done a bit differently, via the Curry-Howard correspondence.  If I
understand it correctly, you write the specification as a logical
implication, then constructively prove the implication using a proof
assistant, which checks the steps and fills in details.  If you do
this with Coq, then Coq can then transform your proof into compileable
code in your choice of several languages.  So rather than writing code
and then proving it meets a spec, you write a proof and then the proof
assistant generates the code corresponding to the proof, as an
instance of the type you defined as your program spec.

  http://www.cs.berkeley.edu/~adamc/papers/CtpcPLDI07/CtpcPLDI07Talk.pdf

is a url to some pdf slides about a talk about a compiler (lambda
calculus to machine code) written with Coq.  It is about 5600 LOC of
which about 5000 are certified.  I'm not sure what the issue is with
the other 600.  I haven't yet looked at the code or any papers about
it (they are on my endless reading list).  

The larger message that I'm getting from this is that verified
software construction is no longer an insanely expensive concept
that's multiple orders of magnitude more difficult and expensive than
writing programs the normal way and hoping for the best.  It's maybe
one OOM more difficult and expensive or at least (with luck) heading
that way.  That is a manageable level for the critical components of
many development projects.

There is a book about Coq (called Coq'art) that I'm thinking of
sending away for.  It's gotten good reviews, and that such a book has
been published at all means the methods are deemed ready for use by a
significant sized audience.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.08.03.25.08.81855@areilly.bpc-users.org>
On Tue, 07 Aug 2007 18:46:10 -0700, Paul Rubin wrote:

> Change the liability statutes so Microsoft has to pay for its own
> bugs, and you'll see a lot more verification in use up there, just
> like it's used at Intel.

Or like Ariane-5, perhaps?  (Specific significance to the discusssion:
there was a historic and expensive failure of strongly typed code (Ada)
that had been designed and implemented with all of the rigour available,
for which the *specification* was in error: it did not match the physical
world in which it had to operate.)

Not that a dynamic language would have helped in that case (but
non-overflowing integers might well have). ML-style typing might even have
helped flag the problem.  The point here is just what Ron has been saying
(I believe): prove what you can if you want to, but you can't get away
from testing, because your spec or your assumptions have just as much
chance of being "wrong" as your code.  Maybe more.

Cheers,

-- 
Andrew
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xy7gmznmw.fsf@ruckus.brouhaha.com>
Andrew Reilly <···············@areilly.bpc-users.org> writes:
> Or like Ariane-5, perhaps?  (Specific significance to the discusssion:
> there was a historic and expensive failure of strongly typed code (Ada)
> that had been designed and implemented with all of the rigour available,
> for which the *specification* was in error: it did not match the physical
> world in which it had to operate.)

From <http://en.wikipedia.org/wiki/Ariane_5_Flight_501>:

    Because of the different flight path, a data conversion from a 64-bit
    floating point to 16-bit signed integer value caused a hardware
    exception (more specifically, an arithmetic overflow, as the floating
    point number had a value too large to be represented by a 16-bit
    signed integer). Efficiency considerations had led to the disabling of
    the software handler (in Ada code) for this error trap, although other
    conversions of comparable variables in the code remained
    protected. This led to a cascade of problems, culminating in
    destruction of the entire flight.

Sounds like they turned off the exception handler which broke the
program semantics, sort of like Lisp with runtime type-checking turned
off loses all its type safety.  And Lisp code runs that way all the
time.

From further down in the same article:

    Flight 501's high profile disaster brought the high risks associated
    with complex computing systems to the attention of the general public,
    politicians, and executives, resulting in increased support for
    research on ensuring the reliability of safety-critical systems. The
    subsequent automated analysis of the Ariane code was the first example
    of large-scale static code analysis by abstract interpretation.

So part of the problem was that they didn't have ENOUGH static analysis.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.08.03.48.01.508797@areilly.bpc-users.org>
On Wed, 08 Aug 2007 13:25:11 +1000, Andrew Reilly wrote:
> Or like Ariane-5, perhaps?

Actually, while googling for details on the Ariane disaster, just now, I
came across this page of numerically-induced disasters:
http://www.ima.umn.edu/~arnold/disasters/disasters.html

While the Ariane-5 fault that I mentioned is one that could plausibly have
been caught with the aid of better compile-time type checking, I doubt
that the other two (Patriot Missile timekeeping failure, Sleipner A
offshore oil platform sinking) could.  Both of these were the result of
accumulated finite-precision rounding error.  Since all of the languages
under discussion use essentially the same floating point arithmetic
systems, they would very likely have behaved similarly in these
conditions.  Numerical analysis at a much higher level than the program
code, and/or more thorough testing and calibration are the only way to
catch these sorts of faults.

No, don't come back with "interval arithmetic"...

-- 
Andrew
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xtzrazn6g.fsf@ruckus.brouhaha.com>
Andrew Reilly <···············@areilly.bpc-users.org> writes:
> While the Ariane-5 fault that I mentioned is one that could plausibly have
> been caught with the aid of better compile-time type checking, I doubt
> that the other two (Patriot Missile timekeeping failure, Sleipner A
> offshore oil platform sinking) could.  Both of these were the result of
> accumulated finite-precision rounding error.  

From your link:

    Specifically, the time in tenths of second as measured by the system's
    internal clock was multiplied by 1/10 to produce the time in
    seconds. This calculation was performed using a 24 bit fixed point
    register. In particular, the value 1/10, which has a non-terminating
    binary expansion, was chopped at 24 bits after the radix point.

> Since all of the languages under discussion use essentially the same
> floating point arithmetic systems, they would very likely have behaved
> similarly in these conditions.

WHICH languages under discussion use 24 bit fixed point arithmetic?

> Numerical analysis at a much higher level than the program
> code, and/or more thorough testing and calibration are the only way to
> catch these sorts of faults.
> No, don't come back with "interval arithmetic"...

They almost certainly would have been fine with regular IEEE 64-bit
double precision, whose error propagation characteristics are very
well studied, and could be bounded by static analysis of the equations
in a problem like this.  So it looks to me like they didn't do enough
analysis.  That was in 1991 and they're more paranoid about things
like that now that it's 16 years later.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.08.06.59.26.511411@areilly.bpc-users.org>
On Tue, 07 Aug 2007 21:04:55 -0700, Paul Rubin wrote:

> Andrew Reilly <···············@areilly.bpc-users.org> writes:
>> While the Ariane-5 fault that I mentioned is one that could plausibly have
>> been caught with the aid of better compile-time type checking, I doubt
>> that the other two (Patriot Missile timekeeping failure, Sleipner A
>> offshore oil platform sinking) could.  Both of these were the result of
>> accumulated finite-precision rounding error.  
> 
> From your link:
> 
>     Specifically, the time in tenths of second as measured by the system's
>     internal clock was multiplied by 1/10 to produce the time in
>     seconds. This calculation was performed using a 24 bit fixed point
>     register. In particular, the value 1/10, which has a non-terminating
>     binary expansion, was chopped at 24 bits after the radix point.
> 
>> Since all of the languages under discussion use essentially the same
>> floating point arithmetic systems, they would very likely have behaved
>> similarly in these conditions.
> 
> WHICH languages under discussion use 24 bit fixed point arithmetic?

None, so?  The source of the error was accumulation of rounding errors on
limited precision registers.  Floating point has exactly the same
behaviour, for the same inescapable reasons.  Single-precision IEEE
floating point only has 25 bits of effective precision, so the issue is
certainly comparable with code that could have been written in lisp or ml
or java, and it would still have failed, in the same way, despite any
amount of static type checking.

>> Numerical analysis at a much higher level than the program
>> code, and/or more thorough testing and calibration are the only way to
>> catch these sorts of faults.
>> No, don't come back with "interval arithmetic"...
> 
> They almost certainly would have been fine with regular IEEE 64-bit
> double precision, whose error propagation characteristics are very
> well studied, and could be bounded by static analysis of the equations
> in a problem like this.  So it looks to me like they didn't do enough
> analysis.  That was in 1991 and they're more paranoid about things
> like that now that it's 16 years later.

Think about ill-conditioned matrices and iterative solutions.  You can get
garbage out of such analyses very easily, irrespective of how many bits
you use in your floating point representation, and no matter how type
correct the solvers. The issue is one of numerical analysis and
conditioning, not one of data typing. Yes, as you said, they didn't do
enough [numerical] analysis. Static type or prorgam analysis could not
find this [class of] error.

-- 
Andrew
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xps1yo5lq.fsf@ruckus.brouhaha.com>
Andrew Reilly <···············@areilly.bpc-users.org> writes:
> > WHICH languages under discussion use 24 bit fixed point arithmetic?
> None, so?  The source of the error was accumulation of rounding errors on
> limited precision registers.  Floating point has exactly the same
> behaviour, for the same inescapable reasons.  Single-precision IEEE
> floating point only has 25 bits of effective precision, 

That page says:

   In other words, the binary expansion of 1/10 is
   0.0001100110011001100110011001100.... Now the 24 bit register in
   the Patriot stored instead 0.00011001100110011001100 ...

See, the leading 3 bits are zero, because it's a fixed point register.
IEEE single precision floating point is normalized so that the leading
bit is always 1, then it doesn't store that bit (hidden bit
representation).  So you get 4 extra bits of precision, though there's
just a 23 bit mantissa, so you have to give one back.  That still
leaves the error lower by a factor of 8, which may have been enough.
The rounding would work differently too--whether better or worse, I
don't know.  It's intricately designed so that adding quotients of
small integers (like maybe 1/10) comes out more accurate than you
might expect.

> so the issue is certainly comparable with code that could have been
> written in lisp or ml or java, and it would still have failed, in
> the same way, despite any amount of static type checking.

Right, they need more static analysis of the numerics if they want to
use single precison.  But as a certain well-known numerics prof likes
to say, the answer is to use double precision.
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9B12D.3030603@gmail.com>
Andrew Reilly wrote:
{stuff trimmed}
>  Static type or prorgam analysis could not
> find this [class of] error.
> 

See
http://www.di.ens.fr/~cousot/COUSOTpapers/EMSOFT07.shtml
http://www.astree.ens.fr/

and explain to me why their framework can not be extended to reasoning 
about fix point arithmetic.

If you let programmers provided hints to static analysis tools, the 
tools degenerate into proof checkers at which point there really is no 
property that can't be verified albeit in a tedious.

Dynamic types or testing is never going to tell me my program is 
dead-lock free or always preforms some action in the future (liveness). 
Liveness properties can't be encode in any runtime framework.
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.08.04.00.22.914488@areilly.bpc-users.org>
On Wed, 08 Aug 2007 03:45:34 +0000, Stefan Ram wrote:

> Andrew Reilly <···············@areilly.bpc-users.org> writes:
>>(I believe): prove what you can if you want to, but you can't get away
>>from testing, because your spec or your assumptions have just as much
>>chance of being "wrong" as your code.  Maybe more.
> 
>   If you test code against its specification,
>   you would not detect wrong specifications.

Duh!

-- 
Andrew
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <0hejif2w18.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Gosh, I'm coming up blank here.  None of them were created with such a tool.
>
> Perhaps you, instead, can list the most valuable software systems that WERE
> verified.

We'll, the software controlling aircraft and trains I'm travelling in,
the software controlling the medical monitor when I'm in hospital and
the software controlling my airbag.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjddko9rkaca3@corp.supernews.com>
Don Geddis wrote:
> So let's start listing the world's most valuable or popular software
> systems, and see if you can find any that used automatic program
> verification in any significant way:
> . MS Windows, Excel, Word, Powerpoint, IE
> . Oracle database
> . SAP
> . Netscape/Firefox
> . Linux
> . Mac OSX, iTunes, iPhoto, iDVD
> . FAA airport air traffic control
> . IRS tax systems
> . Hedge fund automatic program equity trading
> 
> Gosh, I'm coming up blank here.

I know people at Microsoft Research who work full time on the static
verification of Windows (particularly drivers) and I know at least one
person who has done similar work on the Linux kernel.

A quick Google for FAA and static verification indicates that they use it
extensively, etc.

Might I suggest that you are "coming up blank" because you're not looking
rather than because it does not exist?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9B915.7030801@gmail.com>
Jon Harrop wrote:
{stuff deleted}
> I know people at Microsoft Research who work full time on the static
> verification of Windows (particularly drivers) and I know at least one
> person who has done similar work on the Linux kernel.

I know several more full time employees in the product divisions whose 
full time job description is static analysis for windows code and the 
code in office, not to mention various shipping tools like prefast and 
fxcop. Static analysis has escaped the lab. Mainly, because testing has 
its limits. There were persistent quality issues that simple couldn't be 
solved by hiring more testers or better developers.

Some of these quality issues were solved by moving to better language 
frameworks like .NET. These quality issues weren't solved by macros, or 
dynamic typing! Though, I personally think GC is a big win. I often 
LISPers over estimate the value of LISP because they underestimate the 
productivity benefits of a GC.

P.S. Full disclosure despite the email address, I am an MS employee, 
<include standard disclaimer>
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xy7gmci2i.fsf@ruckus.brouhaha.com>
"Daniel C. Wang" <·········@gmail.com> writes:
> I often LISPers over estimate the value of LISP because they
> underestimate the productivity benefits of a GC.

That is a good point, though Java is still awful.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-577D57.15184808082007@news-europe.giganews.com>
In article <················@gmail.com>,
 "Daniel C. Wang" <·········@gmail.com> wrote:

> Jon Harrop wrote:
> {stuff deleted}
> > I know people at Microsoft Research who work full time on the static
> > verification of Windows (particularly drivers) and I know at least one
> > person who has done similar work on the Linux kernel.
> 
> I know several more full time employees in the product divisions whose 
> full time job description is static analysis for windows code and the 
> code in office, not to mention various shipping tools like prefast and 
> fxcop. Static analysis has escaped the lab. Mainly, because testing has 
> its limits. There were persistent quality issues that simple couldn't be 
> solved by hiring more testers or better developers.

Oh, for sure Microsoft's software needs more than testing
to be improved.

But: it's not that not enough bugs have been reported. There
are bug reports from customers overflowing the forums.

Customers have been complaining about MS product quality for years.
THEY seem to be pretty
good a testing these products. Instead of improving
the quality of the products, Microsoft has year over year
added more obscure features to their products.
The whole focus is not on quality. It is on features
and getting money from customers via more updates and
versions and upgrades and bundles and and and...

MS has still a long way out of their great software quality
debacle (and this includes performance, UI, ease of use
and above all security).

And no, dynamic typing and Lisp will not be the solution
to their problems. But I'm totally sure that (more) intensive
testing would make a huge contribution. Microsoft
actually has to address these problems.

Microsoft doesn't use much Lisp. So that's no help. But it
uses VB and other stuff that needs to be fixed. Plus
the biggest thing is: reduce complexity. More complex
tools means more bugs, more problems, less experts, longer
time to fix and so on. So I doubt that adding more and
more to C# and others will help.

> Some of these quality issues were solved by moving to better language 
> frameworks like .NET. These quality issues weren't solved by macros, or 
> dynamic typing! Though, I personally think GC is a big win. I often 
> LISPers over estimate the value of LISP because they underestimate the 
> productivity benefits of a GC.
> 
> P.S. Full disclosure despite the email address, I am an MS employee, 
> <include standard disclaimer>

-- 
http://lispm.dyndns.org
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9E877.1070803@gmail.com>
Rainer Joswig wrote:
{stuff deleted}
> Oh, for sure Microsoft's software needs more than testing
> to be improved.

Testing cost money. There's been a fixed amount of money allocated for 
it. Maybe they should allocate more. For some class of quality bugs, 
Microsoft though it was more cost effective to use static analysis.

see
http://research.microsoft.com/research/pubs/view.aspx?msr_tr_id=MSR-TR-2005-139&0sr=p

Most importantly, testing can not give you full coverage. For some 
properties i.e. you code is free from buffer overruns, your code is free 
from deadlocks, your code follows the right device driver protocols... 
etc I don't think there is a testing methodology that is more cost 
effective than static approaches.

Of course for some other properties it is more cost effective to test. 
i.e. the user scenario works end to end.

I personally know of at least two bugs that were never reported by any 
customer that got *fixed* in Vista because of static tools. At least one 
of these bugs caused the developers to add a more exhaustive test case, 
because they realized there was a "hole" in their test suite.

The other was such a corner case (deadlock occurs when a maintenance 
thread spawned at midnight gets a bad packet) that it was impossible to 
update the test suite. There are actually tons more, but I only 
personally aware of two.

There are also dynamic/static hybrid techniques which improve the cost 
effectiveness of testing for certain properties
http://research.microsoft.com/research/pubs/view.aspx?type=Technical%20Report&id=1300

{stuff deleted}
 > Instead of improving
 > the quality of the products, Microsoft has year over year
 > added more obscure features to their products.
 > The whole focus is not on quality. It is on features
 > and getting money from customers via more updates and
 > versions and upgrades and bundles and and and...

Vista gets dinged in the press for not offering enough new features.
   http://en.wikipedia.org/wiki/Development_of_Windows_Vista
Provides some insight on what's going on in Microsoft today. Also, it's 
as if other companies producing consumer products are really that much 
better in terms of buffer overruns and security issues.
Everyone still issues patches for security issues some of them just less 
frequently. There are limits to what testing and traditional development 
techniques can buy you. "Dynamisms" is only useful up to a point.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-BD44C2.19005608082007@news-europe.giganews.com>
In article <················@gmail.com>,
 "Daniel C. Wang" <·········@gmail.com> wrote:

> Rainer Joswig wrote:
> {stuff deleted}
> > Oh, for sure Microsoft's software needs more than testing
> > to be improved.
> 
> Testing cost money. There's been a fixed amount of money allocated for 
> it. Maybe they should allocate more. For some class of quality bugs, 
> Microsoft though it was more cost effective to use static analysis.

Of course one should use static analysis. In projects I was
working on, we did that too - for Java. We developed a whole
suite of static analysis tools and generated reports.
In one project results were discussed between an architect
and the project management once a week on a regular basis.
We also used tools to design test data, tools to
optimize test coverage (for example to make it more cost effective)
and so on. We were using unit tests, fitnesse tests, Ui-based
testing, tools to analyze runtime behavior (memory), ...

> see
> http://research.microsoft.com/research/pubs/view.aspx?msr_tr_id=MSR-TR-2005-139&0sr=p
> 
> Most importantly, testing can not give you full coverage. For some 
> properties i.e. you code is free from buffer overruns, your code is free 
> from deadlocks, your code follows the right device driver protocols... 
> etc I don't think there is a testing methodology that is more cost 
> effective than static approaches.
> 
> Of course for some other properties it is more cost effective to test. 
> i.e. the user scenario works end to end.
> 
> I personally know of at least two bugs that were never reported by any 
> customer that got *fixed* in Vista because of static tools. At least one 
> of these bugs caused the developers to add a more exhaustive test case, 
> because they realized there was a "hole" in their test suite.
> 
> The other was such a corner case (deadlock occurs when a maintenance 
> thread spawned at midnight gets a bad packet) that it was impossible to 
> update the test suite. There are actually tons more, but I only 
> personally aware of two.
> 
> There are also dynamic/static hybrid techniques which improve the cost 
> effectiveness of testing for certain properties
> http://research.microsoft.com/research/pubs/view.aspx?type=Technical%20Report&id=1300
> 
> {stuff deleted}
>  > Instead of improving
>  > the quality of the products, Microsoft has year over year
>  > added more obscure features to their products.
>  > The whole focus is not on quality. It is on features
>  > and getting money from customers via more updates and
>  > versions and upgrades and bundles and and and...
> 
> Vista gets dinged in the press for not offering enough new features.

MS had to cut back Vista massively from what they initially were promising.
No wonder.

Check out MS Office on the PC. It is overflowing with features.
The new UI was especially developed so that the features are more
accessible. The new UI is still bloated.

>    http://en.wikipedia.org/wiki/Development_of_Windows_Vista
> Provides some insight on what's going on in Microsoft today. Also, it's 
> as if other companies producing consumer products are really that much 
> better in terms of buffer overruns and security issues.
> Everyone still issues patches for security issues some of them just less 
> frequently. There are limits to what testing and traditional development 
> techniques can buy you. "Dynamisms" is only useful up to a point.

The trivial buffer overrun security exploits would go away in Lisp
for starters.

I own a copy of MS Office for the Mac.

MS Word always makes problems when the document gets a bit more then
trivial. The rendering engine is full of bugs. Editing corrupts
documents (!!). I got documents people had used on the PC. The documents also had
corrupt contents. If MS had any sense for the customer, these problems
would be fixed before they develop things like that talking clippy.
Non-document-corrupting editing operations would be
exactly an area where I would use a combination of testing
and trying to prove the absence of certain errors.
If I would write it in Lisp, I would develop a special
layer for the document model with tons of assertions and
a large test suite. Adding also automated random testing.

I have extensively used MS Entourage. The current version
shows a database error that the MS supplied tool doesn't
fix. It detects a corruption, I'm repairing the database, yet
the error is still there. This is basic stuff. If my
data is not secure, then the product can has as many features
as possible, but I migrating away from it.
Before that I was hunted by a bug where the Entourage database
was completely locked for several minutes every so often. I think
MS fixed that maybe after a year. Problem here? Just a guess: No
testing with real-world data in more than trivial size.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xodheeriw.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> And no, dynamic typing and Lisp will not be the solution to their
> [Microsoft's] problems. But I'm totally sure that (more) intensive
> testing would make a huge contribution. Microsoft actually has to
> address these problems.

Microsoft spends millions on testing and more testing would probably
help them even less than Lisp would help them.  A lot of Microsoft's
problems are so simple, they can be explained at a Sesame Street
level: these bugs were brought to you by the letter C.
From: Andrew Smallshaw
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfbuncm.at0.andrews@sdf.lonestar.org>
On 2007-08-11, Paul Rubin <http> wrote:
>
> Microsoft spends millions on testing and more testing would probably
> help them even less than Lisp would help them.  A lot of Microsoft's
> problems are so simple, they can be explained at a Sesame Street
> level: these bugs were brought to you by the letter C.

I've read reports that reviewed the Windows 2000 source code that
got leaked a few years back and the general impression was that it
is the slavish aim for universal compatibility that is Microsoft's
biggest problem: the code was full of instances where one particular
app expected something to happen, and so the OS bent over backwards
not to break it (we aren't always talking about major apps either).

This is a fundamental problem for Windows since its legacy incorporates
insecure 16 bit DOS and Windows, plenty of undocumented system
calls, and fundamental changes such as UNC paths whilst trying not
to break anything along the way.  

Unix differs partly because it was simply more scalable from the
outset - there was never any question of running out of drive
letters for instance - and also that programs are usually recompiled
for new platforms and tweaked for them, rather than expecting a
new platform to offer bug compatibility for the sake of a little-used
decades-old app.

-- 
Andrew Smallshaw
·······@sdf.lonestar.org
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186587377.431754.114200@k79g2000hse.googlegroups.com>
> Though, I personally think GC is a big win. I often
> LISPers over estimate the value of LISP because they underestimate the
> productivity benefits of a GC.

After thousands of lines of Java code, I still think that Java loses
against C++ much less Lisp, GC or no. I don't think C# is any better,
though I haven't written much code in it. It retains a lot of the
fundamentally brain-dead things that bug me about Java...
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9EB31.1040105@gmail.com>
Rayiner Hashem wrote:
>> Though, I personally think GC is a big win. I often
>> LISPers over estimate the value of LISP because they underestimate the
>> productivity benefits of a GC.
> 
> After thousands of lines of Java code, I still think that Java loses
> against C++ much less Lisp, GC or no. I don't think C# is any better,
> though I haven't written much code in it. It retains a lot of the
> fundamentally brain-dead things that bug me about Java...
> 

The C# treatment of generics is a little less brain dead than Java's. C# 
3.0 has quite a lot of new useful features

   http://msdn2.microsoft.com/en-us/vcsharp/aa336745.aspx

Local type-inference, lambda, type-safe meta-programming, ... etc. C# 
has been much more aggressively adding useful features compared to Java.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hlst2F3kp83cU1@mid.individual.net>
Don Geddis wrote:
> Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 17:4:
>> [S]tatic languages are better at reducing the likelihood of long-tail
>> failures, to the point that if I write a sufficiently paranoid static
>> program with correctness proofs, you might not be able to develop a dynamic
>> counterpart even with 100x the expenditure, if its reliability is based
>> only on behavior tests instead of proofs.  Of course the "expenditure"
>> includes the cost of cleaning up after a possible plane crash or nuclear
>> meltdown caused by a software defect.  So I think the high-assurance crowd
>> will stay with static languages
> 
> Nice use of scare tactics, with your plane crash and nuclear horror.  Add in
> the deaths of some children, and maybe a Hitler reference, and I'm sure you'll
> have won the argument!
> 
> OK, let's get real here.  Are you seriously so naive as to believe that
> compile-time type safety proofs are identical with proofs of program
> correctness?  You do realize, I hope, that there's a whole subfield of
> computer science involved with proving programs correct.  And that this is
> a _different_ field than the one interested in static type checking in
> programming languages.
> 
> Having source code pass a static type checker indeed eliminates a particular
> class of possible bugs. 

As an absolute statement, not even that is correct: There are certainly 
runtime errors that cannot be caught by static type systems, and 
likewise, there are static type errors which are not necessarily runtime 
errors. So _in the general case_, there is not necessarily a correlation 
between runtime errors and static type errors.

In the general case, programmers have to work for creating such a 
correlation. The quality of good static type systems is that this 
correlation is easy to create, or most of the time just there without a 
conscious effort. Nevertheless, keeping this correlation in mind 
requires a working style that is not suitable for everyone.

If I understand correctly, people who prefer static type systems tend to 
think in terms of structural invariants of their programs, while people 
who prefer dynamic type systems tend to think in terms of their 
programs' behavior. I am convinced this is a personal preference, 
nothing more, nothing less. I guess that static typers have equal 
problems developing in dynamically typed languages as dynamic typers 
have developing in statically typed languages. I don't see a problem 
here: If the requirements are such that a focus on static correctness is 
important, it's probably better to employ a static typer, whereas if the 
requirements are such that dynamic flexibility is more important, the 
job should better be done by a dynamic typer. There is nothing wrong 
with specialization, we don't need a grand unified theory of program 
development.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ph8x8oir1p.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> If I understand correctly, people who prefer static type systems tend
> to think in terms of structural invariants of their programs, while
> people who prefer dynamic type systems tend to think in terms of their
> programs' behavior. 

There might be some truth in that: People preferring statical typing
develop their programs from their data not from the sequences of
operations performed (behaviour).

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrr8sF3m04h0U3@mid.individual.net>
Markus E.L. 2 wrote:
> Pascal Costanza wrote:
> 
>> If I understand correctly, people who prefer static type systems tend
>> to think in terms of structural invariants of their programs, while
>> people who prefer dynamic type systems tend to think in terms of their
>> programs' behavior. 
> 
> There might be some truth in that: People preferring statical typing
> develop their programs from their data not from the sequences of
> operations performed (behaviour).

...and since programs are data...

But let's not go that path... ;-)


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bkapoggu8oge8@corp.supernews.com>
Pascal Costanza wrote:
> Don Geddis wrote:
>> Having source code pass a static type checker indeed eliminates a
>> particular class of possible bugs.
> 
> As an absolute statement, not even that is correct...

Yes it is.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5huqjlF3lruu7U1@mid.individual.net>
Jon Harrop wrote:
> Pascal Costanza wrote:
>> Don Geddis wrote:
>>> Having source code pass a static type checker indeed eliminates a
>>> particular class of possible bugs.
>> As an absolute statement, not even that is correct...
> 
> Yes it is.

M: Oh look, this isn't an argument.
A: Yes it is.
M: No it isn't. It's just contradiction.
A: No it isn't.
M: It is!
A: It is not.
M: Look, you just contradicted me.
A: I did not.
M: Oh you did!!
A: No, no, no.
M: You did just then.
A: Nonsense!
M: Oh, this is futile!
A: No it isn't.
M: I came here for a good argument.
A: No you didn't; no, you came here for an argument.
M: An argument isn't just contradiction.
A: It can be.
M: No it can't. An argument is a connected series of statements intended 
to establish a proposition.
A: No it isn't.
M: Yes it is! It's not just contradiction.
A: Look, if I argue with you, I must take up a contrary position.
M: Yes, but that's not just saying 'No it isn't.'
A: Yes it is!
M: No it isn't!

A: Yes it is!
M: Argument is an intellectual process. Contradiction is just the 
automatic gainsaying of any statement the other person makes.

A: No it isn't.
M: It is.
A: Not at all.
M: Now look.
A: (Rings bell)  Good Morning.
M: What?
A: That's it. Good morning.
M: I was just getting interested.
A: Sorry, the five minutes is up.
M: That was never five minutes!
A: I'm afraid it was.
M: It wasn't.

A: I'm sorry, but I'm not allowed to argue anymore.


-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmlqaerp2gmf4@corp.supernews.com>
Pascal Costanza wrote:
> Jon Harrop wrote:
>> Pascal Costanza wrote:
>>> Don Geddis wrote:
>>>> Having source code pass a static type checker indeed eliminates a
>>>> particular class of possible bugs.
>>> As an absolute statement, not even that is correct...
>> 
>> Yes it is.
>
> ...
> M: Argument is an intellectual process. Contradiction is just the
> automatic gainsaying of any statement the other person makes.
> ...

Pot, kettle.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0k5s4tuds.fsf@gemini.franz.com>
Jon Harrop <···@ffconsultancy.com> writes:

> Pascal Costanza wrote:
>> Jon Harrop wrote:
>>> Pascal Costanza wrote:
>>>> Don Geddis wrote:
>>>>> Having source code pass a static type checker indeed eliminates a
>>>>> particular class of possible bugs.
>>>> As an absolute statement, not even that is correct...
>>> 
>>> Yes it is.
>>
>> ...
>> M: Argument is an intellectual process. Contradiction is just the
>> automatic gainsaying of any statement the other person makes.
>> ...
>
> Pot, kettle.

Jon,

If you truly want to be taken more seriously in the newsgroup that
you're cross-posting to (c.l.l.), I advise you to take yourself a
little less seriously.  This is the fourth or fifth time I recall
you using a tired old snub (in this case "Well, isn't that the pot
calling the kettle black?") and it seems you can't even be bothered to
provide the whole saying; you're using the standard usenet cliche of
forcing your readers to make the completion so that you don't have to
do so.

If you really want to grab our attention, be creative; Give Edmund
Rostand's "Cyrano de Bergerac" a read, and pay special attention to
the lesson that Cyrano teaches to the lieutenant (I think) in the
subtle art of insult, just before he kills him in the swordfight that
ensues.  The swordfight is serious enough, but Cyrano teaches the
lesson of how to cower his opponents by using creative sarcasm.  There
are probably quite a few insults you can use almost directly there,
too.

Pascal has gone the extra mile on this; you should at least make an
effort... 

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmv2d182hpca3@corp.supernews.com>
Duane Rettig wrote:
> Pascal has gone the extra mile on this...

Pascal incorrectly stated that Don was incorrect without justification. Then
he started a rant about my not justifying myself. I'd hardly call spreading
misinformation "going the extra mile".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0fy2stmzp.fsf@gemini.franz.com>
Jon Harrop <···@ffconsultancy.com> writes:

> Duane Rettig wrote:
>> Pascal has gone the extra mile on this...
>
> Pascal incorrectly stated that Don was incorrect without justification. Then
> he started a rant about my not justifying myself.

You really don't know how to lighten up, do you?

Well, let's try again.  Take the statement you just wrote (which
sort-of brings it back to a technical discussion, as is appropriate),
and compare it with your previous statement:

>> Pot, kettle.

Can you see the obvious disconnect?  Your previous statement was a
weak attempt at sarcasm and turning your nose up at Pascal, but
instead of laughing off my reply and just getting over it, you miss
the point entirely (though it is heartening to see that you try to
come back to the subject).  All I'm saying is that if you want to
attempt to blow someone away with your wit and sarcasm, be sure to
have some firepower to shoot with.

> I'd hardly call spreading misinformation "going the extra mile".

and you didn't see the sarcasm in my post, otherwise you would
have had the good humor to laugh it off; instead you replied
defensively about how bad Pascal has been.  I see this kind of "but
look at what the other person did" all the time - from my grandkids.

Lighten up, dude.

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9gjne$49j$1@aioe.org>
Duane Rettig escreveu:
[snipped]

> 
> and you didn't see the sarcasm in my post, otherwise you would
> have had the good humor to laugh it off; instead you replied
> defensively about how bad Pascal has been.  I see this kind of "but
> look at what the other person did" all the time - from my grandkids.
> 

But what would you expect of a person frustrated to chase a flying frog. . .
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i1csoF3mf0d7U1@mid.individual.net>
Jon Harrop wrote:
> Duane Rettig wrote:
>> Pascal has gone the extra mile on this...
> 
> Pascal incorrectly stated that Don was incorrect without justification. Then
> he started a rant about my not justifying myself. I'd hardly call spreading
> misinformation "going the extra mile".

I didn't expect a kind of Spanish Inquisition!


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186277835.548843.22750@z24g2000prh.googlegroups.com>
> for some A that varies from problem to problem.  There is a
> so-far-unresolved question about whether A in general tends to be
> greater than 1 or less than 1, and that's why these battles keep
> raging.  If A > 1 then dynamic can be said in some sense to "win" and
> vice versa.

I'm completely uninterested in debating whether A < 1 or A > 1, and if
that's what this thread has come down to I'm perfectly content to
leave it alone. What I think is patently ridiculous is the original
premise of this thread, that somehow dynamic typing is an obsolete
technology supersceded by static typing.

> Well, it was Ingo that said that, and I think he was using a pretty
> sweeping definition of toy problems.

Yea, I think "toy" has an obvious-enough connotation that it's clear
he was just being a twit...
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w98x8q7gn8.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> for some A that varies from problem to problem.  There is a
>> so-far-unresolved question about whether A in general tends to be
>> greater than 1 or less than 1, and that's why these battles keep
>> raging.  If A > 1 then dynamic can be said in some sense to "win" and
>> vice versa.
>
> I'm completely uninterested in debating whether A < 1 or A > 1, and if
> that's what this thread has come down to I'm perfectly content to
> leave it alone. What I think is patently ridiculous is the original
> premise of this thread, that somehow dynamic typing is an obsolete
> technology supersceded by static typing.

Really? The subject is still "shootout: implementing an interpreter
for a simple procedural language Minim".

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-D28F5C.03132905082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rayiner Hashem <·······@gmail.com> writes:
> > I could just as easily say "anyway, sure, static typing has benefits,
> > I think we all agree on that. What's not so clear is whether the
> > benefits outweigh the costs on all that many problems".
> 
> I would say that if developing and using a dynamic program costs X
> then developing and using a corresponding static program might cost AX
> for some A that varies from problem to problem.  There is a
> so-far-unresolved question about whether A in general tends to be
> greater than 1 or less than 1, and that's why these battles keep
> raging.  If A > 1 then dynamic can be said in some sense to "win" and
> vice versa.
> 
> What I would say is there is good evidence that A is less than (say)
> 100.  Any real-world application that you can do in Lisp, I can
> probably do in ML if I spend 100x as much.  Would you dispute that?
> 
> What I don't think there's such good evidence for is that A is greater
> than 0.01.  By that I mean, static languages are better at reducing
> the likelihood of long-tail failures, to the point that if I write a
> sufficiently paranoid static program with correctness proofs, you
> might not be able to develop a dynamic counterpart even with 100x the
> expenditure, if its reliability is based only on behavior tests
> instead of proofs.  Of course the "expenditure" includes the cost of
> cleaning up after a possible plane crash or nuclear meltdown caused by
> a software defect.  So I think the high-assurance crowd will stay with
> static languages, by which I would include things like Lisp code
> certified with ACL2.

Unfortunately the maintenance of static software is extremely
costly. Changes to the software extremely costly.
We are talking not only about bugs here, but mostly of changing
requirements. Even during the development of a project (say, 1 year)
easily 30% of the spec changes. Over the runtime
of, say, ten years, easily 90% of the spec can be changed/added/removed.

Maintenance costs in large systems is easily 90% of the
costs of the software.

> 
> > Since you have admitted that theoretical arguments are futile here,
> > since you can't prove your point theoretically, where is your
> > experimental evidence that dynamic languages are only suitable for
> > toy problems?
> 
> Well, it was Ingo that said that, and I think he was using a pretty
> sweeping definition of toy problems.  I might have a problem in which
> data corruption is intolerable but I can live with the program
> crashing with exception traces as long as it doesn't corrupt data.  So
> I'm fine writing stuff like that in Lisp or Python, with assertions
> all over the place that can fail at runtime.  I'm in a different
> situation if the runtime is not allowed to fail.
> 
> Anyway I used to be a C programmer and later switched to Lisp and
> Python, which I liked a lot better.  I'm a newbie to these more
> advanced static languages, so the interest that I've taken in them
> relies mostly on the impressions I get from their users who are much
> more experienced with them than I am.  But I do notice that:
> 
>   1) There are a lot of ML programmers who have considerable Lisp
>      experience but are happier using ML so they stay with it.

I don't believe the 'a lot with considerable experience'.

>   2) There don't seem to be nearly as many programmers with a lot
>      of experience in both languages, who are happier with Lisp.

I have not seen any numbers on this. Do you?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <q0d4y27gp4.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@ruckus.brouhaha.com>,
>  Paul Rubin <·············@NOSPAM.invalid> wrote:
>
>> Rayiner Hashem <·······@gmail.com> writes:
>> > I could just as easily say "anyway, sure, static typing has benefits,
>> > I think we all agree on that. What's not so clear is whether the
>> > benefits outweigh the costs on all that many problems".
>> 
>> I would say that if developing and using a dynamic program costs X
>> then developing and using a corresponding static program might cost AX
>> for some A that varies from problem to problem.  There is a
>> so-far-unresolved question about whether A in general tends to be
>> greater than 1 or less than 1, and that's why these battles keep
>> raging.  If A > 1 then dynamic can be said in some sense to "win" and
>> vice versa.
>> 
>> What I would say is there is good evidence that A is less than (say)
>> 100.  Any real-world application that you can do in Lisp, I can
>> probably do in ML if I spend 100x as much.  Would you dispute that?
>> 
>> What I don't think there's such good evidence for is that A is greater
>> than 0.01.  By that I mean, static languages are better at reducing
>> the likelihood of long-tail failures, to the point that if I write a
>> sufficiently paranoid static program with correctness proofs, you
>> might not be able to develop a dynamic counterpart even with 100x the
>> expenditure, if its reliability is based only on behavior tests
>> instead of proofs.  Of course the "expenditure" includes the cost of
>> cleaning up after a possible plane crash or nuclear meltdown caused by
>> a software defect.  So I think the high-assurance crowd will stay with
>> static languages, by which I would include things like Lisp code
>> certified with ACL2.
>
> Unfortunately the maintenance of static software is extremely
> costly. Changes to the software extremely costly.

Both statements are nonsense, the "extremely" especially. Let me point
you to Ada: The type system and the package interface together express
contracts between different parts of the program. Effects of changes
either stay restricted to the packages or are flagged by the
compiler. Indeed that is one the aims for which Ada was expressively
designed: To make maintenance easier, safer and thus cheaper. 

Ypou are fixated on the amount of work I've to do to build a new
version of the software. Unfortunatly that is dwarved by the amount of
work one has to do to ensure continuing correctness of the
software. Type systems don't guarantee. But they have a tendency to
catch slips of keyboard as well as plainly forgotten propagation of
changes. Maintenance is _certainly_ cheaper and less error prone in a
statically typed language. I'd strongly suggest the difference between
statically and dynamically typed languages is even more pronounced in
maintenance than in development: Because by their nature maintenance
are often done (a) by people that never have seen this piece of code
before (b) are less localized than development work.


> We are talking not only about bugs here, but mostly of changing
> requirements. Even during the development of a project (say, 1 year)
> easily 30% of the spec changes. Over the runtime
> of, say, ten years, easily 90% of the spec can be changed/added/removed.
>
> Maintenance costs in large systems is easily 90% of the
> costs of the software.

Easily. With a modular language AND one where interfaces are
documented by a susfficiently expressive type system you're better of
than without, though.

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f941r6$5at$1@online.de>
Rainer Joswig schrieb:
> 
> Unfortunately the maintenance of static software is extremely
> costly. Changes to the software extremely costly.

The only effect that I can think of would be the need to rewrite tons of 
type annotations for local variables and such.
However, that's not really an issue for languages with type inference.

People do end up writing and maintaining type annotations on "big" 
interfaces (e.g. between a library and the rest of the system).
However, that wouldn't be much different for a dynamic language, it's 
just in the form of a comment so callers know what to submit to and 
expect from a function.

Are there other factors?

Regards,
Jo
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfbci5i.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.functional.]
On 04 Aug 2007 17:42:30 -0700, Paul Rubin <http> wrote:

[...]

> I would say that if developing and using a dynamic program costs X
> then developing and using a corresponding static program might cost AX
> for some A that varies from problem to problem.

I suspect that A not just varies from problem to problem but from
programer to programer and from methodology to methodology.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <qfodhm8xbw.fsf@hod.lan.m-e-leypold.de>
Rayiner Hashem wrote:

>> Anyway, sure, dynamic typing has benefits, I think we all agree on
>> that.  What's not so clear is whether the benefits outweigh the costs
>> on all that many problems.
>
> I could just as easily say "anyway, sure, static typing has benefits,
> I think we all agree on that. What's not so clear is whether the
> benefits outweigh the costs on all that many problems". Who is right
> depends on nasty empirical observations such as how often programmers
> encounter domain invariants that can be easily expressed in existing
> type systems, how often static type systems get in the way of
> extensive refactoring, how often type errors make it past testsuites,
> etc. You can evaluate these in terms of things like the relative value
> (to the customer) of theoretical correctness versus features or
> development time, etc.
>
> Since you have admitted that theoretical arguments are futile here,
> since you can't prove your point theoretically, where is your
> experimental evidence that dynamic languages are only suitable for toy
> problems?


Actually the original claim was not "suitable only for toy problems"
but rather "(in future) we will have untyped languages for toy
problems".

It's more a question of (hypothetized) future usage, not suitability,
presumable assuming that the benefits of strong typing will be
recognized so much in future that static typing takes over the market
for engineered software. Actually I think he is right, because that
already happens: C => C++ => Java => C#: All statically typed
languages.

Regards -- Markus
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfbebpk.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.functional.]
On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
<·····································@ANDTHATm-e-leypold.de> wrote: 

[...]

> It's more a question of (hypothetized) future usage, not suitability,
> presumable assuming that the benefits of strong typing will be
> recognized so much in future that static typing takes over the market
> for engineered software. Actually I think he is right, because that
> already happens: C => C++ => Java => C#: All statically typed
> languages.

Lisp => Scheme => Perl,Python => Ruby => Perl6?

All nonstatically typed languages.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xd4y0ir57.fsf@hod.lan.m-e-leypold.de>
"David Formosa (aka ? the Platypus)" wrote:

> ["Followup-To:" header set to comp.lang.functional.]
> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
> <·····································@ANDTHATm-e-leypold.de> wrote: 
>
> [...]
>
>> It's more a question of (hypothetized) future usage, not suitability,
>> presumable assuming that the benefits of strong typing will be
>> recognized so much in future that static typing takes over the market
>> for engineered software. Actually I think he is right, because that
>> already happens: C => C++ => Java => C#: All statically typed
>> languages.
>
> Lisp => Scheme => Perl,Python => Ruby => Perl6?

You might note, that those are actually used in industry but seldom
enough in a strategy, mor in the sense of "we got this (open source)
system and it's in X and we want to fiy it". Systems which get
developed from scratch in the industry for deployment to paying
customers are rarely developed in those languages.

Regards -- Markus



PS: You write to c.l.l, you also get my answer to c.l.l. If you don't
    want that, be so gracious as to answer to c.l.f only.
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfbhtj0.219.dformosa@localhost.localdomain>
On Mon, 06 Aug 2007 18:06:44 +0200, Markus E.L. 2
<·····································@ANDTHATm-e-leypold.de> wrote: 
>
> "David Formosa (aka ? the Platypus)" wrote:
>
>> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
>> <·····································@ANDTHATm-e-leypold.de> wrote: 
[...]
>>> Actually I think he is right, because that
>>> already happens: C => C++ => Java => C#: All statically typed
>>> languages.
>>
>> Lisp => Scheme => Perl,Python => Ruby => Perl6?
>
> You might note, that those are actually used in industry but seldom
> enough in a strategy, mor in the sense of "we got this (open source)
> system and it's in X and we want to fiy it".

Typically the languages mentioned above are distributed in source form
(by convention or because they use some sort of immediate or JIT
compiling) which means that there is a bias towards distributing the
product as a open source.  Unfortunitly stratagy is dicated more by
manigment then by programers, and that level often is inflenced more
by marketing then the virtues of the language.

> Systems which get
> developed from scratch in the industry for deployment to paying
> customers are rarely developed in those languages.

I may be in a rare orginization where we do deliver a product using
those langauges to paying customers.
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9doe5$7v9$1@aioe.org>
Markus E.L. 2 escreveu:
> "David Formosa (aka ? the Platypus)" wrote:
> 
>> ["Followup-To:" header set to comp.lang.functional.]
>> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
>> <·····································@ANDTHATm-e-leypold.de> wrote: 
>>
>> [...]
>>
>>> It's more a question of (hypothetized) future usage, not suitability,
>>> presumable assuming that the benefits of strong typing will be
>>> recognized so much in future that static typing takes over the market
>>> for engineered software. Actually I think he is right, because that
>>> already happens: C => C++ => Java => C#: All statically typed
>>> languages.
>> Lisp => Scheme => Perl,Python => Ruby => Perl6?
> 
> You might note, that those are actually used in industry but seldom
> enough in a strategy, mor in the sense of "we got this (open source)
> system and it's in X and we want to fiy it". Systems which get
> developed from scratch in the industry for deployment to paying
> customers are rarely developed in those languages.
> 
> Regards -- Markus
> 

I'm afraid that the same argument can be used with functional languages 
as well...
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <s1myx1f3zi.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> "David Formosa (aka ? the Platypus)" wrote:
>>
>>> ["Followup-To:" header set to comp.lang.functional.]
>>> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
>>> <·····································@ANDTHATm-e-leypold.de>
>>> wrote:
>>>
>>> [...]
>>>
>>>> It's more a question of (hypothetized) future usage, not suitability,
>>>> presumable assuming that the benefits of strong typing will be
>>>> recognized so much in future that static typing takes over the market
>>>> for engineered software. Actually I think he is right, because that
>>>> already happens: C => C++ => Java => C#: All statically typed
>>>> languages.
>>> Lisp => Scheme => Perl,Python => Ruby => Perl6?
>> You might note, that those are actually used in industry but seldom
>> enough in a strategy, mor in the sense of "we got this (open source)
>> system and it's in X and we want to fiy it". Systems which get
>> developed from scratch in the industry for deployment to paying
>> customers are rarely developed in those languages.
>> Regards -- Markus
>>
>
> I'm afraid that the same argument can be used with functional
> languages as well...

And so? That makes C++ / Java / C# untyped suddenly?

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9gksg$7nt$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Markus E.L. 2 escreveu:
>>> "David Formosa (aka ? the Platypus)" wrote:
>>>
>>>> ["Followup-To:" header set to comp.lang.functional.]
>>>> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
>>>> <·····································@ANDTHATm-e-leypold.de>
>>>> wrote:
>>>>
>>>> [...]
>>>>
>>>>> It's more a question of (hypothetized) future usage, not suitability,
>>>>> presumable assuming that the benefits of strong typing will be
>>>>> recognized so much in future that static typing takes over the market
>>>>> for engineered software. Actually I think he is right, because that
>>>>> already happens: C => C++ => Java => C#: All statically typed
>>>>> languages.
>>>> Lisp => Scheme => Perl,Python => Ruby => Perl6?
>>> You might note, that those are actually used in industry but seldom
>>> enough in a strategy, mor in the sense of "we got this (open source)
>>> system and it's in X and we want to fiy it". Systems which get
>>> developed from scratch in the industry for deployment to paying
>>> customers are rarely developed in those languages.
>>> Regards -- Markus
>>>
>> I'm afraid that the same argument can be used with functional
>> languages as well...
> 
> And so? That makes C++ / Java / C# untyped suddenly?


Non sequitur.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <6btzr7zgny.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Markus E.L. 2 escreveu:
>>>> "David Formosa (aka ? the Platypus)" wrote:
>>>>
>>>>> ["Followup-To:" header set to comp.lang.functional.]
>>>>> On Sun, 05 Aug 2007 05:38:43 +0200, Markus E.L. 2
>>>>> <·····································@ANDTHATm-e-leypold.de>
>>>>> wrote:
>>>>>
>>>>> [...]
>>>>>
>>>>>> It's more a question of (hypothetized) future usage, not suitability,
>>>>>> presumable assuming that the benefits of strong typing will be
>>>>>> recognized so much in future that static typing takes over the market
>>>>>> for engineered software. Actually I think he is right, because that
>>>>>> already happens: C => C++ => Java => C#: All statically typed
>>>>>> languages.
>>>>> Lisp => Scheme => Perl,Python => Ruby => Perl6?
>>>> You might note, that those are actually used in industry but seldom
>>>> enough in a strategy, mor in the sense of "we got this (open source)
>>>> system and it's in X and we want to fiy it". Systems which get
>>>> developed from scratch in the industry for deployment to paying
>>>> customers are rarely developed in those languages.
>>>> Regards -- Markus
>>>>
>>> I'm afraid that the same argument can be used with functional
>>> languages as well...
>> And so? That makes C++ / Java / C# untyped suddenly?
>
>
> Non sequitur.

You're right. Your response was a 'non sequitur'. I just thought that
perhaps I missed something.

Regards -- Markus
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186385249.987758.133870@l70g2000hse.googlegroups.com>
On Aug 4, 7:32 am, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Don Geddis <····@geddis.org> writes:
> > Surely you can see how insulting your comments are to the highly skilled
> > professional programmers who are currently choosing to implement their
> > algorithms in languages like Common Lisp; who are aware of and have used
> > languages like Haskell and OCaml and ML; and who nonetheless prefer
> > developing production-quality code in a dynamically typed language?
>
> ARE there many programmers like that?  

Lispers are language junkies, I've never seen community so dedicated
for picking up other languages like them, after basic,pascal,c/c++,
c#, php, ocaml, sql, delphi, and little ruby and python I thought I
was a flip flop freak , than after hanging prolonged time with lispers
I found that I actually discovered lisp quite early. Just read some of
the entries of road to lisp http://wiki.alu.org/The_Road_to_Lisp_Survey
or you lack time Kennys Top tenhttp://wiki.alu.org/Kenny's_RtLS_Top-Ten
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186227984.903447.242940@57g2000hsv.googlegroups.com>
On 4 Aug., 06:06, Don Geddis <····@geddis.org> wrote:
> Ingo Menger <···········@consultant.com> wrote on Fri, 03 Aug 2007:
>
> > My personal opinion here is that we will have untyped languages for toy
> > scripts, as it used to be. And for the rest, where dollars or human lifes
> > count, we'll have languages with even more advanced type systems.
>
> Presumably, you mean to include dynamically typed languages like Common Lisp
> under your "untyped" category, and compile-time statically typed languages
> as the precursors to these "even more advanced type systems".

Sure.


> Surely you can see how insulting your comments are to the highly skilled
> professional programmers who are currently choosing to implement their
> algorithms in languages like Common Lisp;

No, I can't see how a opinion (explicitly marked as such) about how
future devolpments will evolve can possibly be insulting.

> Perhaps you'll
> begin to see that programming language design involves tradeoffs, and that
> your cherished compile-time static typing is not necessarily a Universal
> Good.

Straw men, and, since you adress me here, I have posted half a dozen
comments here where I admint that current type systems have their
limits and are far from being "Universal Good". I repeat it here
since, admittedly, the thread is quite long.


> > The time is not so far away when we will regard an
> > ArrayIndexOutOfBoundsException a typing error, just as we today may regard
> > a NullPointerException a typing error
>
> Really.

Yes.

> Consider this program (in pseudocode):
>
>         define array A [1..78556];
>         set i = compute_smallest_sierpinski_number();
>         set A[i] = 10;
>
> It may help you to reference
>        http://en.wikipedia.org/wiki/Sierpinski_number
> In short, the smallest Sierpinski number is probably 78557, but there
> remains a chance that there is a smaller one.  The answer is deterministic,
> computable, the algorithm is known ... but the answer is not yet known by
> any human being.
>
> Good luck inventing a compile-time static type system that will label the
> final line of code as "ArrayIndexOutOfBounds",

That's too easy, ye'know. :)
The compiler would simply flag any array access. Voil�. Runtime error
catched at compile time.

Seriously again: The problem is not to find the bad array accesses,
but the good ones, so to speak. The obligation is on you, the designer-
programmer, to proove that the access is good. Of course, the language
has to give you the tools to do that.
That this doesn't mean it must be complicated for the programmer is
demonstrated by the success of the HM type system in various
languages. A stronger type system does not necessarily mean you have
to write more type declarative code.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87sl6z5eq3.fsf@geddis.org>
Ingo Menger <···········@consultant.com> wrote on Sat, 04 Aug 2007:
> On 4 Aug., 06:06, Don Geddis <····@geddis.org> wrote:
>> Ingo Menger <···········@consultant.com> wrote on Fri, 03 Aug 2007:
>> > My personal opinion here is that we will have untyped languages for toy
>> > scripts, as it used to be. And for the rest, where dollars or human lifes
>> > count, we'll have languages with even more advanced type systems.
>>
>> Surely you can see how insulting your comments are to the highly skilled
>> professional programmers who are currently choosing to implement their
>> algorithms in languages like Common Lisp;
>
> No, I can't see how a opinion (explicitly marked as such) about how
> future devolpments will evolve can possibly be insulting.

Hmm.  So you're honestly confused?  Then let me help you out.

In your original quote, which prompted my response, you expressed a "personal
opinion" that untyped languages (like Common Lisp) would eventually find
their natural niche, useful only for "toy scripts".  Obviously, anyone doing
Real Work would use a "more advanced type system", the early versions of
which are already available in Haskell, OCaml, etc.  That's your "personal
opinion", right?  That dynamic languages shouldn't be used for serious work.

As it turns out, there are already highly experienced programmers that
currently choose to write production-quality code in dynamic languages (like
Common Lisp).  They make these choices consciously, already aware of the
tradeoffs between static type checking and dynamic type checking.

Surely your personal opinion is that these folks are idiots, regardless of
their background and experience.  Dynamic languages are only useful for toy
problems, right?  If a programmer chooses to use one for a Real Problem, that
programmer must necessarily be ignorant, right?  There is no possible reason
an educated programmer would make such a choice.  Right?

My own Personal Opinion is that you yourself are arrogant and uneducated about
the value of dynamically typed programming languages.  But please, don't take
offense.  My comment can't possibly be insulting, because I clearly labelled
it as my "personal opinion".

>> Perhaps you'll begin to see that programming language design involves
>> tradeoffs, and that your cherished compile-time static typing is not
>> necessarily a Universal Good.
>
> Straw men, and, since you adress me here, I have posted half a dozen
> comments here where I admint that current type systems have their limits
> and are far from being "Universal Good". I repeat it here since,
> admittedly, the thread is quite long.

But you have incorrectly assumed that my comment was only directed toward's
today's not-quite-perfect implementation.  In fact, my comment was written
exactly as I intended: that NO compile-time static type system, even in
theory, comes with zero drawbacks and is in all ways superior to a dynamic
type system.

Your focus on a hypothetical perfect future static type system blinds you to
the tradeoffs that need to be made for any decision in programming language
design.

>>         define array A [1..78556];
>>         set i = compute_smallest_sierpinski_number();
>>         set A[i] = 10;
>> Good luck inventing a compile-time static type system that will label the
>> final line of code as "ArrayIndexOutOfBounds",
>
> That's too easy, ye'know. :) The compiler would simply flag any array
> access. Voil�. Runtime error catched at compile time.  Seriously again: The
> problem is not to find the bad array accesses, but the good ones, so to
> speak. The obligation is on you, the designer- programmer, to proove that
> the access is good.

So try to follow your own logic through.  You're right at the border of a
great insight, but seem unable to cross the fence on your own.

Any static type checking system is going to try to do some inference at
compile time, to logically conclude that some errors would necessarily happen
at run time.  There are two reasons why this task is impossible in general:
1. Some data only appears at run time;
2. Inference is undecidable.

Therefore, the truth of any such system, even in theory, even in the future,
is that all programs will be classified into three categories, not just two:
1. The type system can prove at compile time that the program will not
   generate a runtime error;
2. The type system can prove at compile time that the program WILL generate
   a runtime error;
3. The type system can't be sure whether or not the program will generate
   a runtime error.

Everyone knows that programs in category 1 are great, and the type system has
been very helpful for any programmer who can be given such assurances.

Everyone knows that programs in category 2 probably ought to be fixed right
away (although even this is sometimes argued by the dynamic folks).

But the real question is about programs in category 3.  Dynamic typing fans
WANT TO RUN THOSE PROGRAMS ANYWAY, and see what happens at runtime.  Static
typing fans almost universally PROHIBIT the compiler from finishing programs
in category 3, and statically typed languages almost universally prevent such
programs from running.  Static typing fans incorrectly assert that there are
no interesting programs in category 3, and so the language's prohibition on
running them should not negatively impact a real programmer.

Do you begin to see how YOUR OWN SUGGESTION, that

        The obligation is on you, the designer- programmer, to proove that
        the access is good.

is a possible negative in the space of programming language design?  That,
whatever the virtues of static typing systems, you have committed to a
requirement that not every programmer (even very experienced ones) might
see as having zero cost?

Do you begin to understand why programming language design involves
tradeoffs, and why, even in the future, there may be a place for dynamically
typed languages?

> That this doesn't mean it must be complicated for the programmer is
> demonstrated by the success of the HM type system in various languages. A
> stronger type system does not necessarily mean you have to write more type
> declarative code.

It boggles my mind how you are unable to see the contradiction in your own
examples.  I gave you a piece of (pseudo)code that a language with dynamic
typing could compile and execute as is.  You agree that a static typing
system would reject the program and refused to compile it; you say that it is
the programmer's responsibility to give the compiler enough information to
prove that the program is type safe; and now at the same time you're going to
try to claim that the programmer doesn't have to write more type declarative
code?

Really, I don't even know why I'm arguing with you.  You seem to be arguing
with yourself just as well.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Is the glass half empty, half full, or twice as large as it needs to be?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x643vqd50.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> It boggles my mind how you are unable to see the contradiction in
> your own examples.  I gave you a piece of (pseudo)code that a
> language with dynamic typing could compile and execute as is.  You
> agree that a static typing system would reject the program and
> refused to compile it; you say that it is the programmer's
> responsibility to give the compiler enough information to prove that
> the program is type safe; and now at the same time you're going to
> try to claim that the programmer doesn't have to write more type
> declarative code?

I don't see the problem in the example you gave.  As Ingo says (if I
understand him), the language could simply refuse to allow A to be an
array in that code.  You'd use something like a dictionary object
instead, that can take arbitrary keys, and that gets rid of the
possibility of an index-out-of-range error without needing additional
type declarations.  
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <877ioa6had.fsf@geddis.org>
> Don Geddis <···@geddis.org> writes:
>> It boggles my mind how you are unable to see the contradiction in
>> your own examples.  I gave you a piece of (pseudo)code that a
>> language with dynamic typing could compile and execute as is.  You
>> agree that a static typing system would reject the program and
>> refused to compile it; you say that it is the programmer's
>> responsibility to give the compiler enough information to prove that
>> the program is type safe; and now at the same time you're going to
>> try to claim that the programmer doesn't have to write more type
>> declarative code?

Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 13:0:
> I don't see the problem in the example you gave.  As Ingo says (if I
> understand him), the language could simply refuse to allow A to be an
> array in that code.

But that's the code the programmer gave.  Declare A to be an array, of a
certain size.

I agree that the language can simply say "I am unable to guarantee that you
will not have a type error at runtime, therefore I refuse to run the code
as is; you must change the code before I run it."

The point, as you keep missing, is that a dynamically typed language may
be able to happily run the code exactly as written.  And, it may happen that
the code does not, in fact, run into a runtime type error when so executed.

Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 13:0:
> You'd use something like a dictionary object instead, that can take
> arbitrary keys, and that gets rid of the possibility of an
> index-out-of-range error without needing additional type declarations.

You can do all sorts of things to change the code, and make it acceptable to
a static type system.  One option is that you could provide additional type
declarations, which allow the static type checker to complete its proofs.
Another option, as you suggest, is to change the data structure, again so that
the static type checker is able to complete its proofs.

Yet another option is to use a dynamically typed language, not change the code
at all, and successfully execute it as is.

You don't seem able to give the simple acknowledgement that (mandatory)
static type checkers provide a programmer with costs, as well as benefits.
Not just benefits.

Rayiner Hashem <·······@gmail.com> writes:
> What if the appropriate data structure, in the Donald Knuth sense, for
> that problem is an array?

Paul Rubin <·············@NOSPAM.invalid> wrote on 04 Aug 2007 13:0:
> If an array cannot meet the requirements of the problem then by definition
> it is not the appropriate data structure.

Ah, but now you're trying to slip out of the trap with mere arrogant
proclamations.

I carefully set up the original problem so that it is indeed possible that
the array does perfectly meet the requirements of the problem.  The ONLY
problem is that the static type checker (and in fact, probably any future
static type checker) is unable to PROVE that it meets the requirements of the
problem.

But I'm not going to let you get away with the clearly false and arrogant
assumption that a piece of code is inappropriate BECAUSE the static type
checker is unable to prove it correct.

That case, in fact, is the exact source of conflict between fans of dynamic
type checking vs. fans of static type checking.  Namely, the potential value
of (successfully!) executing code that is not able to be proven type safe by
static type checkers.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Blow ye winds, like the trumpet blows; but without that noise.
	-- Deep Thoughts, by Jack Handey
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8lvebsk8xt.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> You don't seem able to give the simple acknowledgement that (mandatory)
> static type checkers provide a programmer with costs, as well as benefits.
> Not just benefits.

Not quite. I think we're unable to give in to critique on claims of
what static type systems can do and are good that you, not us, put
forth. You're consistently confusing type systems, program
verification and static checking. It's diffult to acknowledge anyting
on that basis. Certainly _static typing_ has also cost. To the
programmer who has learned his language the costs are negligible, the
benefits are vast (but of course don't cover all problems encountered
in programming). 

What you're trying is to account the costs of other programming
methods and tools to static typing and showing that static typeing
cannot deliver on the promise of those other methods, thus (a)
increasing the cost and (b) denying the benefits.

This would be dishonest if done deliberatly, but I put it down to your
(and other peoples) lack of knowledge what static typing is and how it
is used. But please to me favour: Learn one, at least one
statically typed language up to a certain point [1] before you and/or read
up the relevant literature on static typing before you continue this
discussion so vocally from completely wrong premises.

BTW: I'd be really interested on the language experience of some of
the static typing opponents in this thread. This is a question that
never got answered (when asked by MB from RJ), but it would help
enormously if we knew that we actually have a tutorial session on
static typing know, instead of a scientific of political debate. Not
that I think that it is my or anyone elses duty to conduct tutorials
on usenet: We might reach the point where we point to external
ressources wuite early.

Regards -- Markus
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186258788.174476.204750@m37g2000prh.googlegroups.com>
> I don't see the problem in the example you gave.  As Ingo says (if I
> understand him), the language could simply refuse to allow A to be an
> array in that code.  You'd use something like a dictionary object
> instead, that can take arbitrary keys, and that gets rid of the
> possibility of an index-out-of-range error without needing additional
> type declarations.  

What if the appropriate data structure, in the Donald Knuth sense, for
that problem is an array?
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xvebvots1.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> What if the appropriate data structure, in the Donald Knuth sense, for
> that problem is an array?

If an array cannot meet the requirements of the problem then by
definition it is not the appropriate data structure.
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186266374.790324.173840@q75g2000hsh.googlegroups.com>
> If an array cannot meet the requirements of the problem then by
> definition it is not the appropriate data structure.

There is a difference between the problem, and the model of the
problem as expressed in accordance with a type system. If the model of
the problem won't admit a data structure appropriate* to the problem,
because the type system can't express it, then the type system is
broken, not the program.

*) Where appropriateness is defined in terms of real-world criteria,
like ease of use, algorithmic complexity, and cache footprint.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x1weiucm6.fsf@ruckus.brouhaha.com>
Rayiner Hashem <·······@gmail.com> writes:
> There is a difference between the problem, and the model of the
> problem as expressed in accordance with a type system. If the model of
> the problem won't admit a data structure appropriate* to the problem,
> because the type system can't express it, then the type system is
> broken, not the program.

Programmers don't always get to choose the specifications.  If the
specification says some function is not allowed to throw subscript
exceptions, the programmer has to find a way to make sure that the
function doesn't throw subscript exceptions.  If that can't be done
with an array (by proving that the subscripts are always in range),
then an array is not the appropriate data structure.
From: David Formosa (aka ? the Platypus)
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfb9vpb.219.dformosa@localhost.localdomain>
["Followup-To:" header set to comp.lang.functional.]
On 04 Aug 2007 14:48:46 -0700, Paul Rubin <http> wrote:
> Rayiner Hashem <·······@gmail.com> writes:
>> What if the appropriate data structure, in the Donald Knuth sense, for
>> that problem is an array?
>
> If an array cannot meet the requirements of the problem then by
> definition it is not the appropriate data structure.

The requirements of my problem are,
    The data structure has O(1) access time.
    The data structure has a defined order
    The data structure is indexed numerically
    The data structure can store a value at the smallest Sierpinski number.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186517383.134040.287530@g12g2000prg.googlegroups.com>
On Aug 4, 1:05 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
> I don't see the problem in the example you gave.  As Ingo says (if I
> understand him), the language could simply refuse to allow A to be an
> array in that code.  You'd use something like a dictionary object
> instead, that can take arbitrary keys, and that gets rid of the
> possibility of an index-out-of-range error without needing additional
> type declarations.

That's not terribly useful if the data associated with the index
matters.  The data that is returned for a "missing" index can be of
the appropriate type yet still cause the program to produce the wrong
results.

The goal isn't to not signal errors, it is to produce the right
results.


 
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186393008.944703.96430@b79g2000hse.googlegroups.com>
On 4 Aug., 20:36, Don Geddis <····@geddis.org> wrote:
> Ingo Menger <···········@consultant.com> wrote on Sat, 04 Aug 2007:

> In your original quote, which prompted my response, you expressed a "personal
> opinion" that untyped languages (like Common Lisp) would eventually find
> their natural niche, useful only for "toy scripts".  Obviously, anyone doing
> Real Work would use a "more advanced type system", the early versions of
> which are already available in Haskell, OCaml, etc.  That's your "personal
> opinion", right?  That dynamic languages shouldn't be used for serious work.

s/should not/will not/
Markus has already pointed out that historically, one trend is towards
ever stronger type systems. On the other side, we see also
developments in the untyped field like python and ruby, replacing the
older perl and PHP gradually.

> As it turns out, there are already highly experienced programmers that
> currently choose to write production-quality code in dynamic languages (like
> Common Lisp).  They make these choices consciously,

Really.

> already aware of the
> tradeoffs between static type checking and dynamic type checking.

I don't share this impression. When it is stated here that "I do unit
tests, they'll catch type errors like a type system does" it is
obvious that not even the fundmental epistemological difference
between "test" and "proof" is clear.


> Surely your personal opinion is that these folks are idiots,

No.
If you absolutely want to draw this conclusion, then please don't
blame it on me.

> Dynamic languages are only useful for toy
> problems, right?

s/are/will be/

> If a programmer chooses to use one for a Real Problem, that
> programmer must necessarily be ignorant, right?  There is no possible reason
> an educated programmer would make such a choice.  Right?

Wrong. Please come down.


> My own Personal Opinion is that you yourself are arrogant and uneducated
> the value of dynamically typed programming languages.

Arrogant? Maybe. Uneducated? Not at all.

> But please, don't take offense.

Are you kidding? Who, do you think, are you, that you think you can
offend me with such childish babble?

> My comment can't possibly be insulting, because I clearly labelled
> it as my "personal opinion".

This is okay.


> Any static type checking system is going to try to do some inference at
> compile time, to logically conclude that some errors would necessarily happen
> at run time.  There are two reasons why this task is impossible in general:
> 1. Some data only appears at run time;

This argument does not hold water.
Surely, data appears at run time. Since we don't know the data at
compile time, data have to be checked for correctness anyway at
runtime. Thus, the type of input data will always be something like
  Maybe MyData
  Either Error MyData
The rest is a matter of well understood parser technology.

> 2. Inference is undecidable.

This statement is meanignless without reference to a type system. What
you mean is "Type inference in type system X is undecidable."
The undecidability of certain type systems with regard to type
inference does not mean it's useless. When type inference is
undecidable, type checking often is not. In practice, that means the
programmer has to supply a few hints to the type checker in the form
of type annotations. This is how it works in the Glasgow Haskell
Compiler that extends the traditional HM type system with
(undecidable) higher rank polymorphism.


> Therefore, the truth of any such system, even in theory, even in the future,
> is that all programs will be classified into three categories, not just two:
> 1. The type system can prove at compile time that the program will not
>    generate a runtime error;
> 2. The type system can prove at compile time that the program WILL generate
>    a runtime error;

This statement reveals that you are in fact ignorant of how strongly
typed languages work. That set of programs simply does not exist - no
more than the set of programs with syntax errors.

> 3. The type system can't be sure whether or not the program will generate
>    a runtime error.
>
> Everyone knows that programs in category 1 are great, and the type system has
> been very helpful for any programmer who can be given such assurances.
>
> Everyone knows that programs in category 2 probably ought to be fixed right
> away (although even this is sometimes argued by the dynamic folks).
>
> But the real question is about programs in category 3.

There are (at least) 3 subcategories:
3.a The type system is not powerful enough to express the types
needed.
3.b The type of the program cannot be told at all or is meaningless,
as in the case of EVAL.
3.c. The programmer does not really care about types, abstractions,
etc., he instead prefers a method of writing down something, watching
traces and patching bugs until the program nearly does what he wants.

The case 3.a is the one I am interested in, not as researcher, but as
future user (programmer).
3.b is more difficult. The problem with EVAL is, of course, that
anything might happen.
Interestingly, the following would be possible, at least, in a
strongly typed language: One must use EVAL only when it is clear of
what type the resulting expression shall be, i.e.
  print (eval x)
would be wrong (just like show.read is currently in Haskell) but
  print ((eval x)::Int)
would be ok. More likely, though, would be:
  case ((eval x)::Maybe Int) of
     Nothing -> print "wrong number"
     Just i -> print i
Eval could then parse the string, typecheck the code and check for the
correct type, then it would run the code only if the type was ok.
(certain technical difficulties like substitution of local variables,
etc. had to be adressed, of course)
And, in this case, we could also be sure the injected code did not
remove some files, fired a rocket somewhere or caused the reactor to
melt, since such code surely would have type (IO Int) (in Haskell,
that is).

> Dynamic typing fans
> WANT TO RUN THOSE PROGRAMS ANYWAY, and see what happens at runtime.

Yes, I see. If one were malicious, one could understand that as
"Dynamic typing fans don't really know what they are doing.", which
would cover case 3.c

> Static
> typing fans almost universally PROHIBIT the compiler from finishing programs
> in category 3, and statically typed languages almost universally prevent such
> programs from running.

Thanks god (for case 3.c)

> Static typing fans incorrectly assert that there are
> no interesting programs in category 3,

This is simply not true. Research in type systems is going on just
because people know that there are programs in category 3.a
Yet, I at least think that EVAL is evil and ought to be banned from
the vocabulary when it is not yet another website, yet another
blogging software, i.e. when it's more than a toy program. And, of
course, I disregard 3.c altogether.

> Do you begin to see how YOUR OWN SUGGESTION, that
>
>         The obligation is on you, the designer- programmer, to proove that
>         the access is good.
>
> is a possible negative in the space of programming language design?

Not at all. In the example given, you don't know what the smallest
Sierpinsky number is, yet you index an array with it! In other words:
you don't know what you are doing. And indeed, I prefer the heart-lung
machine that maybe one day is saving my life be programmed by people
that know what they are doing.

>  That,
> whatever the virtues of static typing systems, you have committed to a
> requirement that not every programmer (even very experienced ones) might
> see as having zero cost?

Who said it had zero cost?

> Do you begin to understand why programming language design involves
> tradeoffs, and why, even in the future, there may be a place for dynamically
> typed languages?

I didn't say there wasn't, you know.

>
> > That this doesn't mean it must be complicated for the programmer is
> > demonstrated by the success of the HM type system in various languages. A
> > stronger type system does not necessarily mean you have to write more type
> > declarative code.
>
> It boggles my mind how you are unable to see the contradiction in your own
> examples.  I gave you a piece of (pseudo)code that a language with dynamic
> typing could compile and execute as is.  You agree that a static typing
> system would reject the program and refused to compile it; you say that it is
> the programmer's responsibility to give the compiler enough information to
> prove that the program is type safe; and now at the same time you're going to
> try to claim that the programmer doesn't have to write more type declarative
> code?

It's not my fault when the facts are telling so.
SML or Haskell do have a type system incomparably stronger and more
powerful than that of PASCAL, for example. And yet there is less type
declarative code required in SML and Haskell. In most cases, no type
declarative code is required at all!

You see, the claim "more powerful type systems will require more type
declarative code" is definitely false. Therefore, by the law of
excluded middle, the opposite must be true.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87tzrcxzw9.fsf@geddis.org>
Ingo Menger <···········@consultant.com> wrote on Mon, 06 Aug 2007:
> When it is stated here that "I do unit tests, they'll catch type errors
> like a type system does" it is obvious that not even the fundmental
> epistemological difference between "test" and "proof" is clear.

There is overlap, but unit tests actually check some things (like program
correctness) that static typing doesn't check; meanwhile, as you say,
static typing can potentially find some problems that unit tests might miss.

The real question is one of utility.  How often does static typing find a
problem that is not found by other approaches (such as unit tests); how
onerous are the restrictions that static typing requires of programmers, etc.

All I'm asking is that you understand that there is a tradeoff, and while
static typing may sometimes buy you a benefit, it also imposes a cost.  And
it's far from clear that the cost is worth the benefit.

>> Dynamic languages are only useful for toy problems, right?
>
> s/are/will be/

You say this, apparently without understand the costs imposed by static
typing, and the benefits derived by dynamic typing.  Your only understanding
seems to be about the benefits of static typing.  We all admit there are some
benefits.  That isn't the issue.

> The undecidability of certain type systems with regard to type inference
> does not mean it's useless.

Of course not.  What it means is that there may be some useful programs
that the static type checker cannot prove to be type safe.

Dynamic typing fans want to run these programs anyway.  Static typing fans
want to force the programmer to annotate the program so that the checker is
able to complete its proofs.

> When type inference is undecidable, type checking often is not. In
> practice, that means the programmer has to supply a few hints to the type
> checker in the form of type annotations. This is how it works in the
> Glasgow Haskell Compiler that extends the traditional HM type system with
> (undecidable) higher rank polymorphism.

Requiring the hints imposes a cost on the programmer, for what may be dubious
benefits.  Besides that, it is not always possible to solve the problem via
the use of hints (e.g. with EVAL).

> This statement reveals that you are in fact ignorant of how strongly
> typed languages work. That set of programs simply does not exist - no
> more than the set of programs with syntax errors.

You're just arbitrarily defining the only valid programs to be those that
pass the static type checker.  You need to widen your horizon a bit, and look
at the whole world of computation and programming languages.

Program source code is a description, from a human being to a computer, of how
to do a computation.  We can formalize a lot of this, assign semantics to an
overall program by decomposition and looking at the semantics of the smaller
parts, etc.

At the end of the day, the question is whether a computer could execute the
program and produce a result.  That is "useful".  That is "success".

Take your example program with a syntax error somewhere.  What if that syntax
error was in the middle of unreachable code?  It appears in the source, but
no execution of the program will ever reach it.  Why must such a program "not
exist" or "have no semantics"?  Only because you are arbitrarily choosing to
define it so.  Some of us can see a bigger picture here.

>> 3. The type system can't be sure whether or not the program will generate
>>    a runtime error.
>
> There are (at least) 3 subcategories:
> 3.a The type system is not powerful enough to express the types
> needed.
> 3.b The type of the program cannot be told at all or is meaningless,
> as in the case of EVAL.
> 3.c. The programmer does not really care about types, abstractions,
> etc., he instead prefers a method of writing down something, watching
> traces and patching bugs until the program nearly does what he wants.

You're missing plenty more cases.  In particular, the one I keep mentioning
is where the type system IS "powerful enough to express the types needed",
but inference with that powerful type system is undecidable, so the checker
is unable to be sure whether or not the program is type safe.

And, of course, there's always the EVAL case.

> 3.b is more difficult. The problem with EVAL is, of course, that
> anything might happen.
> Interestingly, the following would be possible, at least, in a
> strongly typed language: One must use EVAL only when it is clear of
> what type the resulting expression shall be, i.e.
>   print (eval x)
> would be wrong (just like show.read is currently in Haskell) but
>   print ((eval x)::Int)
> would be ok.

You're attempting to solve YOUR problem (of static type checking) by limiting
MY abilities as a programmer.  "One must use EVAL only when ..."

Surely you can see that an easy alternate response is that EVAL is very
useful, and if I have to make a choice I'd rather give up the static type
checker than give up EVAL?

>> Dynamic typing fans WANT TO RUN THOSE PROGRAMS ANYWAY, and see what
>> happens at runtime.
>
> Yes, I see. If one were malicious, one could understand that as
> "Dynamic typing fans don't really know what they are doing.", which
> would cover case 3.c

You are indeed malicious.  If you were honest, you would see that there are
many different kinds of programs in category 3, and static typing has problems
with all of them.  Wanting to run such a program is NOT evidence that a
programmer "doesn't really know what he is doing", as you claim.  That claim
only shows that you don't understand the limitations of static typing.

> Yet, I at least think that EVAL is evil and ought to be banned from
> the vocabulary when it is not yet another website, yet another
> blogging software, i.e. when it's more than a toy program.

Well, at last you come out with it.

So, we now know that there is a feature of some programming languages, which
many mature, experienced programmers find valuable, that you personally think
is "evil" and "ought to be banned".  So of course you're not concerned when
your favorite static typing system can't accommodate it!

Can't you at least try for a little empathy?  Try to imagine that these
experienced programmers might actually have found a little value in EVAL, and
that they naturally might chafe at the imposition of a new system (static
typing) which eliminates this useful feature?

That static typing might not always be a win, but might in addition impose
some costs as well on the programmer?  That it might be hard to tell whether
the costs are worth the benefits, in general?

>> Do you begin to understand why programming language design involves
>> tradeoffs, and why, even in the future, there may be a place for
>> dynamically typed languages?
>
> I didn't say there wasn't, you know.

Well, yes, you allowed dynamically typed language to be used for "toy
scripts", that true.  But, in your future, there's no place for them in any
serious programming work.

>> You agree that a static typing system would reject the program and refused
>> to compile it; you say that it is the programmer's responsibility to give
>> the compiler enough information to prove that the program is type safe;
>> and now at the same time you're going to try to claim that the programmer
>> doesn't have to write more type declarative code?
>
> It's not my fault when the facts are telling so.  SML or Haskell do have a
> type system incomparably stronger and more powerful than that of PASCAL,
> for example. And yet there is less type declarative code required in SML
> and Haskell. In most cases, no type declarative code is required at all!
>
> You see, the claim "more powerful type systems will require more type
> declarative code" is definitely false. Therefore, by the law of excluded
> middle, the opposite must be true.

We're not talking about comparing with Pascal and C.  Come on, get real.

Yes, of course SML/Haskell has a stronger/more powerful type system than
Pascal/C, while at the same time requires less declarative code.  That's
not a source of debate.

The interesting comparison, about writing "more type declarative code", is
with dynamic typing languages like Lisp or Perl or Python or Ruby.

Those are languages that would happily execute code (perhaps to the
programmer's benefit) which languages with static type checkers are forced
to reject.  Unless the programmer adds additional type declarative code.

I'm just trying to get you to see that there is a cost to requiring static
type checking.  And that there is an alternative programming language design
(dynamic type checking) which doesn't impose this cost.  And that it's up to
you to show that the benefits gained are worth the cost imposed (if you think
that EVERY serious programmer should use static typing checking for ANY
serious software development).

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Patriotism is your conviction that this country is superior to all others
because you were born in it.  -- George Bernard Shaw
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186471719.022093.202120@r34g2000hsd.googlegroups.com>
On 6 Aug., 20:48, Don Geddis <····@geddis.org> wrote:
> Ingo Menger <···········@consultant.com> wrote on Mon, 06 Aug 2007:

> The real question is one of utility.  How often does static typing find a
> problem that is not found by other approaches (such as unit tests);

This is not the question.
The question is: when do you find the problem? In an early development
phase, or 5 years after the program was in production.

> how
> onerous are the restrictions that static typing requires of programmers

They are not onerous.

> > The undecidability of certain type systems with regard to type inference
> > does not mean it's useless.
>
> Of course not.  What it means is that there may be some useful programs
> that the static type checker cannot prove to be type safe.

Yes, but so what? A few type annotations here and there, and that was
that. Type annotations, to be sure, that are most often supplied
anyway, even when not strictly needed, as a form of high quality
comment.

> > When type inference is undecidable, type checking often is not. In
> > practice, that means the programmer has to supply a few hints to the type
> > checker in the form of type annotations. This is how it works in the
> > Glasgow Haskell Compiler that extends the traditional HM type system with
> > (undecidable) higher rank polymorphism.
>
> Requiring the hints imposes a cost on the programmer, for what may be dubious
> benefits.  

This is not true.
Do you ever write comments in your programs? Despite the cost this
imposes?

> Besides that, it is not always possible to solve the problem via
> the use of hints (e.g. with EVAL).

There is no dispute that EVAL is not typeable, as it stands.
EVAL seems also be the reason why you and others here that constantly
provide "examples of wehat can't be done in strongly typed
languages" (those examples contain EVAL, of course) and also those
that claim that LISP has "type checking", albeit a dynamic one, cannot
differentiate between compile time type checking and runtime type
checking. I also have the impression that many LISP programmers can't
see that dynamic type checking at runtime is fundamentally different
from compile time (static) type checking.


> > This statement reveals that you are in fact ignorant of how strongly
> > typed languages work. That set of programs simply does not exist - no
> > more than the set of programs with syntax errors.
>
> You're just arbitrarily defining the only valid programs to be those that
> pass the static type checker.

No, it's not me, it's the specifications of the languages in question.
There's also a reason why this has to be so, you can read about it,
for example in "Types and Programming Languages" or other well known
books.

> You need to widen your horizon a bit, and look
> at the whole world of computation and programming languages.

I know that there are programming languages that don't care about a
formal semantics. That's precisely why I made the statement that you
found to be offending in the first place.


> Take your example program with a syntax error somewhere.  What if that syntax
> error was in the middle of unreachable code?  It appears in the source, but
> no execution of the program will ever reach it.

This is nonsense. Take for example:
  if a > b then print "foo" elsaa print ) "bar"
How is print ) "bar" unreachable code here?
When you say "unreachable code", you presuppose already that the
source code is a sentence of the language in question. Syntax analysis
is the process that recognizes sentences of a language, and rejects
non-sentences. Requiring non-sentences to have nevertheless semantics
in the language is just nonsensical. It is like requiring that the
sentence "LISP Programmierer sind schon seltsame Leute e anche sono
gente stupidi qui fa male" has any meaning in english or even russian
language.

> Why must such a program "not
> exist" or "have no semantics"?

If you don't know that, I suspect you are not in a position to speak
for "sophisticated programmers" and "highly skilled developers".

> Only because you are arbitrarily choosing to
> define it so.

No, not *arbitrarily*. There's a lot of math/theory involved here, but
some LISP  programmers do not seem to care. They type their parens and
look what "happens when I run it".

> You're missing plenty more cases.  In particular, the one I keep mentioning
> is where the type system IS "powerful enough to express the types needed",
> but inference with that powerful type system is undecidable, so the checker
> is unable to be sure whether or not the program is type safe.

I repeat that is is not an issue at all. If your opinion is that 3 or
so type annotations in 100 lines of code are unbearable costs, well,
that's not my fault.

> Surely you can see that an easy alternate response is that EVAL is very
> useful, and if I have to make a choice I'd rather give up the static type
> checker than give up EVAL?

Believe me, I have used lots of EVAL when I used to be a perl
programmer and have seen even more code where it was used. And that's
the reason I estimate that 80% of them are not really needed.


> > Yet, I at least think that EVAL is evil and ought to be banned from
> > the vocabulary when it is not yet another website, yet another
> > blogging software, i.e. when it's more than a toy program.
>
> Well, at last you come out with it.
>
> So, we now know that there is a feature of some programming languages, which
> many mature, experienced programmers find valuable, that you personally think
> is "evil" and "ought to be banned".  So of course you're not concerned when
> your favorite static typing system can't accommodate it!

No type system can ever accomodate it, as it stands.
Which means, if it's really needed (and not just comfortable to use)
then, indeed, you can't use a typed language.


> Can't you at least try for a little empathy?  Try to imagine that these
> experienced programmers might actually have found a little value in EVAL, and
> that they naturally might chafe at the imposition of a new system (static
> typing) which eliminates this useful feature?

As I said before, I don't have to imagine, I just remember my perl
days.

> The interesting comparison, about writing "more type declarative code", is
> with dynamic typing languages like Lisp or Perl or Python or Ruby.

Ok, but then, to be fair, we had to count in all lines that deal with
"dynamic typing". If there aren't any, we can't compare at all, cause
we have very different programs.

> And that it's up to
> you to show that the benefits gained are worth the cost imposed (if you think
> that EVERY serious programmer should use static typing checking for ANY
> serious software development).

No, it's not up to me. You'll have to weight your costs against your
benefits yourself.
All I can do is to say: Well, there IS a safety belt, though it can't
prevent any accident per se, it can save your live or at least your
face looks much better after an accident. Therefore, I strongly
suggest to wear it.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ir7qvrjf.fsf@geddis.org>
Ingo Menger <···········@consultant.com> wrote on Tue, 07 Aug 2007:
>> > This statement reveals that you are in fact ignorant of how strongly
>> > typed languages work. That set of programs simply does not exist - no
>> > more than the set of programs with syntax errors.
>>
>> You're just arbitrarily defining the only valid programs to be those that
>> pass the static type checker.
>
> No, it's not me, it's the specifications of the languages in question.

We're attempting here to have a larger discussion.  Not about what any
particular language does, but rather from the perspective of a designer of
a new language.  At some point the designer faces a choice: should the
language have a static type checker, or not?

It is simply false that the only possible valid programs, in some
hypothetical new programming language, are those that can be typed by a
static type checker.  It's trivial to construct examples of programs in
pseudocode that can be assigned useful semantics, but cannot be type checked
by static type checkers.

> I know that there are programming languages that don't care about a formal
> semantics. That's precisely why I made the statement that you found to be
> offending in the first place.

And again, you are confused a rejection of static type checkers, with a
rejection of semantics.  There is more to the topic of programming language
semantic than just what can be proven by a static type checker.

In fact, this is probably the core debate on this whole thread: that the fans
of static type checking can't even conceive of useful programs which are not
typeable by their static checkers.

But the dynamic typing fans can conceive of such programs, and some of them
use such programs regularly.

> Syntax analysis is the process that recognizes sentences of a language, and
> rejects non-sentences. Requiring non-sentences to have nevertheless
> semantics in the language is just nonsensical.

It's not typically done, but it is far from nonsensical.

It's along the same lines as dynamic type checking in Lisp.  The code
        (+ 3 "hello")
can't result in a meaningful value, but it is still given semantics in Lisp
and implementations are required to abide by its well-defined behavior.

Programming language designers rarely bother giving semantics to syntax
errors, but there's no reason in principle why they couldn't.  Much along
the lines of the way Netscape/Firefox/IE attempt to deal with malformed
HTML out there in the world, still rendering a reasonable web page from the
source.

>> Why must such a program "not exist" or "have no semantics"?
>
> If you don't know that, I suspect you are not in a position to speak
> for "sophisticated programmers" and "highly skilled developers".

Another alternative: you are mistaken that it is impossible to assign
semantics to such things.

>> Only because you are arbitrarily choosing to define it so.
>
> No, not *arbitrarily*. There's a lot of math/theory involved here, but some
> LISP programmers do not seem to care. They type their parens and look what
> "happens when I run it".

And you again confuse fans of dynamic typing with people who don't care about
semantics.  You persist in your narrow view that there are no possible issues
in programming languages besides what you know from your world of static
typing.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Minds, like parachutes, only function when they are open.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186566805.471257.83720@b79g2000hse.googlegroups.com>
On 8 Aug., 01:44, Don Geddis <····@geddis.org> wrote:
> Ingo Menger <···········@consultant.com> wrote on Tue, 07 Aug 2007:
>
> >> > This statement reveals that you are in fact ignorant of how strongly
> >> > typed languages work. That set of programs simply does not exist - no
> >> > more than the set of programs with syntax errors.
>
> >> You're just arbitrarily defining the only valid programs to be those that
> >> pass the static type checker.
>
> > No, it's not me, it's the specifications of the languages in question.
>
> We're attempting here to have a larger discussion.  Not about what any
> particular language does, but rather from the perspective of a designer of
> a new language.  At some point the designer faces a choice: should the
> language have a static type checker, or not?

I'd suggest he decides this not at some point, but in the very
beginning (after having comprehended the relevant literature about
type systems and how they are related to semantics, that is).
BTW, I am wondering whether perl6 will be a success. Apparently, some
attempts have been made to include a form of static type checking, but
given the huge perl5 legacy, it is of course impossible to abandon
dynamic type checking. I have my doubts that this will work out very
well.

> It is simply false that the only possible valid programs, in some
> hypothetical new programming language, are those that can be typed by a
> static type checker.  It's trivial to construct examples of programs in
> pseudocode that can be assigned useful semantics, but cannot be type checked
> by static type checkers.

Granted.

> In fact, this is probably the core debate on this whole thread: that the fans
> of static type checking can't even conceive of useful programs which are not
> typeable by their static checkers.

You don't seem to read my postings, where I admitted this repeatedly.

> > Syntax analysis is the process that recognizes sentences of a language, and
> > rejects non-sentences. Requiring non-sentences to have nevertheless
> > semantics in the language is just nonsensical.
>
> It's not typically done, but it is far from nonsensical.

Can't you comprehend a reasoning that simple?
Why, do you think, does one define a grammar in the first place?

> Programming language designers rarely bother giving semantics to syntax
> errors, but there's no reason in principle why they couldn't.  Much along
> the lines of the way Netscape/Firefox/IE attempt to deal with malformed
> HTML out there in the world, still rendering a reasonable web page from the
> source.

So you advocate, for example, for a LISP compiler to insert missing
closing parens or to remove some that are in excess?
This, together with EVIL and hot patching and DO-WHAT-I-MEAN-semantics
but without static type checking surely would be the next big leap to
secure, reliable software. ;-/

> Another alternative: you are mistaken that it is impossible to assign
> semantics to such things.

The problem is that it is just too easy to "assing" semantics to
anything. The question is whether this is useful, consistent,
theoretically founded, orthogonal with the rest of the system and pays
off in the long run.
One could advocate, for example, that it should be possible to add a
floating point number to a pointer in C. (Then one could write
(3.1415+"baz"))
No problem here to "assign" one of many possible semantics for that.
But, incidentally, this wouldn't make the language any better.

Your taste may vary, but for me, the follwoing holds: I'll never want
to add a number to a string, I'll never want to select the second
tuple element from a list, I see a tuple of lists to be different from
a list of tuples and so forth. Therefore, I am grateful when the
compiler points me to code that accidently confuses lists and tuples,
strings and numbers etc., for this surely indicates a slip or even a
conceptual error on my side.

Sure, it would be possible for a compiler to continue and produce some
runnable artifact even in the presence of type errors. The compiler
could just replace the definition of the offending function by
something like:
  (\a -> error "type error on line 42 of frobnicate.source")
having type
  forall a b.a -> b
But, honestly, I feel better when such is not possible, for abusable
features will be abused sooner or later and then we end up having
unreliably libraries where such crap is lurking in the dark and is
waiting for the day it can exercise it's "assigned" semamtics ...
From: Andrew Reilly
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <pan.2007.08.08.10.17.45.355636@areilly.bpc-users.org>
On Wed, 08 Aug 2007 02:53:25 -0700, Ingo Menger wrote:

> One could advocate, for example, that it should be possible to add a
> floating point number to a pointer in C. (Then one could write
> (3.1415+"baz"))
> No problem here to "assign" one of many possible semantics for that.
> But, incidentally, this wouldn't make the language any better.

Orthogonal to the issue under debate here, but that's certainly possible
in both Forth and (most) assemblers.  There are quite a lot of variants on
this sort of theme that are quite (or even extremely) useful.  Usually on
small-scale, low-level platform-specific sorts of situations of course. 
The semantics can very well defined (they're defined exclusively by the
operator, rather than the operands, and so in the case that you mention,
the result is unlikely to be useful as either a floating point number or a
pointer, but you might be able to make some use of it as an unsigned
integer.  Not recommended.  Don't try this at home.  YMMV. etc...)

 :-)  (The grin here is because I imagine you rolling on the ground and
frothing at the mouth on reading this...)

Cheers,

-- 
Andrew
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186570907.182312.286180@57g2000hsv.googlegroups.com>
On 8 Aug., 12:17, Andrew Reilly <···············@areilly.bpc-
users.org> wrote:
> On Wed, 08 Aug 2007 02:53:25 -0700, Ingo Menger wrote:
> > One could advocate, for example, that it should be possible to add a
> > floating point number to a pointer in C. (Then one could write
> > (3.1415+"baz"))
> > No problem here to "assign" one of many possible semantics for that.
> > But, incidentally, this wouldn't make the language any better.
>
> Orthogonal to the issue under debate here, but that's certainly possible
> in both Forth and (most) assemblers.

To be honest, I'm not that sure that this is really impossible in C.
Somehow I suspect some C compiler might (even silently) interpret it
as ((int) 3.1415 + "baz")

>  :-)  (The grin here is because I imagine you rolling on the ground and
> frothing at the mouth on reading this...)

No problem. Nothing like that can frighten me anymore, you know. I am
occasionally reading comp.lang.c when I feel like horror ... :)
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1u643r2vji.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

>> Why must such a program "not
>> exist" or "have no semantics"?
>
> If you don't know that, I suspect you are not in a position to speak
> for "sophisticated programmers" and "highly skilled developers".

Unfortunately that is what this discussion boils down to: Whom I'm
talking to and does "the other side" have the necessary requirements to
understand what I'm saying. I'm not having a PHD in CS (as Don
does, BTW), but I'm appalled at the misconceptions I continue to hear
about how languages are specified and which kind of statements on
programs make sense. I would have expected that insight in the
framework of programming language semantics (perhaps denotational) and
static typing would be part of every curriculum these days. Mind you:
Not that people are forced to buy into the conviction that those will
help them or be better in some way than alterntive approches: Just
understand the way the internal logic of those subject areas works to
be able to talk to other people from those fields.

Regards -- Markus
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <n1odhk6xx0.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> You say this, apparently without understand the costs imposed by static
> typing, and the benefits derived by dynamic typing.  Your only understanding
> seems to be about the benefits of static typing.  

> We all admit there are some benefits.

No, you don't: With you and Rayiner this sentence is just a figure of
speech to avoid listening what you get told: "Yes, yes, we know, but
...".


>> The undecidability of certain type systems with regard to type inference
>> does not mean it's useless.

> Of course not.  What it means is that there may be some useful programs
> that the static type checker cannot prove to be type safe.

Inference != proof. You grok that?

> Dynamic typing fans want to run these programs anyway.  

You can run those programs, no sweat. I've actually written a compiler
which compiles non statically typed programs to some execeutable. If
you run it, it deletes all your files, scratches your CDs and tries to
hack into strategic command to start a nuclear war. From what we
understand that conforms to the langage specfication (minus the clause
that badly typed programs should not compile, but you ordered the
removal of that).


> Static typing fans want to force the programmer to annotate the
> program so that the checker is able to complete its proofs.

I took the liberty not to quote the rest of the post to spare you the
embarrasment to see your revealing remarks repeated -- revealing
concerning your degree of understanding typing and programming
language specification.

Oh, perhaps just only one thing:

> Can't you at least try for a little empathy?  

No.

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87myx3uef7.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Tue, 07 Aug 2007:
>> there may be some useful programs that the static type checker cannot
>> prove to be ["well typed"].  Dynamic typing fans want to run these
>> programs anyway.
>
> You can run those programs, no sweat. I've actually written a compiler
> which compiles non statically typed programs to some execeutable. If
> you run it, it deletes all your files, scratches your CDs and tries to
> hack into strategic command to start a nuclear war. From what we
> understand that conforms to the langage specfication (minus the clause
> that badly typed programs should not compile, but you ordered the
> removal of that).

Are you deliberately misunderstanding, or just honestly confused?

There are many languages in the world.  Some have dynamic type checking,
some have static type checking, some do almost no type checking.  Each
language has its own semantics (or none at all).

You can take a dyanamically checked language like Lisp.  There are lots of
valid programs you can write in that language, that all have perfectly well
understood semantics provided by the language.

Now, someone comes along and says "your language would be improved if you
added a static type checker".  The language designer considers modifying his
language to become a new language, very much like the dynamic language it
used to be, but now with the addition of a static type checker.

Ah, but then he notices a snag.  There used to be some valid programs with
valid semantics in the original dynamic language, which are unable to be
well typed by the static type checker.  Moreover, some of these are useful
programs, and the community currently using the original dynamic language
would (rightfully!) complain if they were forced to transition to the new
language with static checking, at the cost of giving up those useful programs.

Summary: just because your static type checker is unable to type a program,
doesn't mean that any behavior at all from the resulting code is acceptable.

You constantly ignore the demonstrated fact that it is possible to give a
useful semantics to useful programs which cannot be statically type checked.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Ah, women.  They make the highs higher and the lows more frequent.
	-- Friedrich Nietzsche
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bi0ol87e148d6@corp.supernews.com>
Don Geddis wrote:
> Of course not.  What it means is that there may be some useful programs
> that the static type checker cannot prove to be type safe.
> 
> Dynamic typing fans want to run these programs anyway.  Static typing fans
> want to force the programmer to annotate the program so that the checker
> is able to complete its proofs.

As I've said elsewhere, this is a definition rather than an argument. You
can run dynamically typed programs through a type checker at compile time
to try to find bugs but you can still run the program even if the static
type checker fails to prove type correctness.

> Take your example program with a syntax error somewhere.  What if that
> syntax
> error was in the middle of unreachable code?  It appears in the source,
> but
> no execution of the program will ever reach it.  Why must such a program
> "not
> exist" or "have no semantics"?  Only because you are arbitrarily choosing
> to
> define it so.  Some of us can see a bigger picture here.

If the code is never run and can never work, why give it to a compiler?

> Surely you can see that an easy alternate response is that EVAL is very
> useful,

What do you use EVAL for that cannot be implemented trivially (or isn't
already implemented) in OCaml?

> and if I have to make a choice I'd rather give up the static type  
> checker than give up EVAL?

You can implement EVAL as an interpreter or you can use run-time code
generation is a statically typed language like MetaOCaml.

> with all of them.  Wanting to run such a program is NOT evidence that a
> programmer "doesn't really know what he is doing", as you claim.  That
> claim only shows that you don't understand the limitations of static
> typing.

You are asking for a language that allows you to feeds its compiler both
correct programs and jibberish that happens to parse.

Out of curiosity, if your favorite dynamic language was extended to allow
lexical and parse errors in "code that can never be run", would you be
ecstatic because it was suddenly even more powerful and expressive,
allowing an even wider variety of non-sensical jibberish to be fed into the
compiler?

> Can't you at least try for a little empathy?  Try to imagine that these
> experienced programmers might actually have found a little value in EVAL,
> and that they naturally might chafe at the imposition of a new system
> (static typing) which eliminates this useful feature?

Why do you say that static typing eliminates EVAL?

> I'm just trying to get you to see that there is a cost to requiring static
> type checking.  And that there is an alternative programming language
> design (dynamic type checking) which doesn't impose this cost.

You don't seem to mention the costs of dynamic typing, such as awful
performance?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87wsw6u3k6.fsf@geddis.org>
Jon Harrop <···@ffconsultancy.com> wrote on Wed, 08 Aug 2007:
> You can run dynamically typed programs through a type checker at compile
> time to try to find bugs but you can still run the program even if the
> static type checker fails to prove type correctness.

Yes, I agree.  And I think that's a great direction to head for programming
language design.

>> Take your example program with a syntax error somewhere.  What if that
>> syntax error was in the middle of unreachable code?  It appears in the
>> source, but no execution of the program will ever reach it.  Why must such
>> a program "not exist" or "have no semantics"?  Only because you are
>> arbitrarily choosing to define it so.  Some of us can see a bigger picture
>> here.
>
> If the code is never run and can never work, why give it to a compiler?

The idea was that there is a large and complex program, the vast majority
of which has valid syntax and does something useful, but one tiny corner has
an error of some kind (for example, a syntax error or a type error).

The point, naturally, is to explore the execution code paths other than the
ones that would hit the error.

> What do you use EVAL for that cannot be implemented trivially (or isn't
> already implemented) in OCaml?

I don't know OCaml, so I couldn't tell you.

Pascal's example was to redefine existing class (aka structures/records),
adding additional fields -- at runtime, after the program has already been
compiled.  And after the creation of a data structure which is an instance
of that class.

Redefining function (especially with different type signatures) is another
example.

I would expect that OCaml, being in the "proudly statically typed!" camp,
would choose not to offer language features like these that render the vast
bulk of compile-time type inference useless.  (Well, to be fair: render the
inference as "advisory/warnings" rather than "cannot run/no semantics/errors".)

> Out of curiosity, if your favorite dynamic language was extended to allow
> lexical and parse errors in "code that can never be run", would you be
> ecstatic because it was suddenly even more powerful and expressive,
> allowing an even wider variety of non-sensical jibberish to be fed into the
> compiler?

That particular example was for the sake of argument, and no, it isn't
especially useful.

But the general design idea of a language trying to do its best with whatever
the programmer gives it, and especially one that allows execution of
error-free paths even if other non-executed paths might fail, and for that
matter one with a debugger that allows you to repair a program in the running
image instead of only by making source code modifications followed by
recompiling; yes, all those things I find quite valuable.

> Why do you say that static typing eliminates EVAL?

As you say (elsewhere), not in the general sense that SBCL's Common Lisp
implementation is already doing technical "static typing".

But in the sense meant in this thread, as evidenced by all the languages that
claim to be in the static typing camp as opposed to the dynamic typing camp;
those languages wish to prevent programs from being executed unless their
type checkers can verify that all expressions maintain their type contracts.

EVAL allows one to redefine functions during execution, including changing
their type signatures, so it goes completely against the wishes of the
pro-static typing community to absolutely eliminate runtime type errors via
compile-time type checking.

But you're right, SBCL is surely evidence that you can have a kind of static
type checking even in a language (Common Lisp) that has EVAL.  I doubt the
"pro-static type" fans in this thread would count that, though.  I'll leave
it to them as to whether they want to adopt Lisp as an equal member of the
static typing family.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Children are natural mimics who act like their parents despite every effort to
teach them good manners.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xejieog32.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> The idea was that there is a large and complex program, the vast majority
> of which has valid syntax and does something useful, but one tiny corner has
> an error of some kind (for example, a syntax error or a type error).

Why not fix the error?  Do you want your languages to also be able to
run code that has syntax errors?  It not, why should type errors be
different?

> The point, naturally, is to explore the execution code paths other than the
> ones that would hit the error.

Why so impatient to run the code with a syntax (or type error)?  How
hard is it to fix, possibly by writing a 1-line stub that signals an
error?  Of the various justifications I've been hearing for dynamic
types, wanting to run code with known compile-time errors is just
about the worst.

> Redefining function (especially with different type signatures) is another
> example.

In GHCI 6.4.2:

    Prelude> let double x = x + x
    Prelude> double 3
    6
    Prelude> let double x = (x, x)     -- new type signature
    Prelude> double 3
    (3,3)
    Prelude>

I'm not sure if that satisfies what you're describing though.

> I would expect that OCaml, being in the "proudly statically typed!"
> camp, would choose not to offer language features like these that
> render the vast bulk of compile-time type inference useless.  (Well,
> to be fair: render the inference as "advisory/warnings" rather than
> "cannot run/no semantics/errors".)

I don't know OCaml but I think Ingo was saying there is a fundamental
reason in the semantics of these languages, that type-incorrect
programs can't mean anything and that type errors had to be treated
like syntax errors.  I've been wondering why that might be (it's not
like that in C, where you usually parse the whole function or
expression before doing type assignment).  He referred to Pierce's
book about types, which I have a copy of but haven't had a chance to
look there.

One issue that comes to mind is if a unification-based inferencer
can't resolve the types two expressions, it might not know which type
is the right one.  In that case maybe it could proceed if you add an
annotation (it would generate code for something with the type you
specify, but that would signal an error on evaluation).  Adding an
annotation where the error is doesn't seem like a terrible imposition
if you want to compile an error on purpose.

> fail, and for that matter one with a debugger that allows you to
> repair a program in the running image instead of only by making
> source code modifications followed by recompiling; yes, all those
> things I find quite valuable.

Sure, this is fine, I'm happy with anything that gives me more
leverage to debug.  But I also want a compiler setting that says "I'm
done debugging, this is the real thing, and if there is the slightest
error including type errors, then the program should be considered
broken".  That could be considered a zeroth order unit test, which any
code checked into a production tree is required to pass, but which
isn't required for debugging.
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87zm120xdu.fsf@geddis.org>
Paul Rubin <·············@NOSPAM.invalid> wrote on 07 Aug 2007 20:3:
> Don Geddis <···@geddis.org> writes:
>> The point, naturally, is to explore the execution code paths other than the
>> ones that would hit the error.
>
> Why so impatient to run the code with a syntax (or type error)?

I'm trying to maximize the efficiency of the skilled programmer.  They are
the best judge of where their intellectual effort should be going at any
particular point in time.

This eventually becomes an argument about premature optimization.  Surely
it's fairly well established by now that a good programmer ought to think
about software in an order like this first:
1. does the design meet the customer's needs?
2. is the code actually correct?  does it do what it is supposed to do?
3. is it fast/efficient enough?

My reaction to your question is much the same as if you had said that some
compiler's language forces me to decide on whether to use an array or a list
for some data structure, when at that point I really ought to be concerned
with whether the code is returning correct results or not.

This is also related to exploratory prototype programming.  There is a place
for dotting all the i's and crossing all the t's, but that place is generally
at the end of programming, not at the beginning.

> But I also want a compiler setting that says "I'm done debugging, this is
> the real thing, and if there is the slightest error including type errors,
> then the program should be considered broken".  That could be considered a
> zeroth order unit test, which any code checked into a production tree is
> required to pass, but which isn't required for debugging.

I agree that such a setting is an excellent idea.  I'd love for any of my
programming languages/environments to have such a tool, for final checking
of code before delivery to some customer (user, source control, etc.).

(At the same time, I want to be able to turn such strict checking off during
initial development.)

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Q. What is the similarity between an elephant and a grape?
A. They are both purple...except for the elephant.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xejidcibx.fsf@ruckus.brouhaha.com>
Don Geddis <···@geddis.org> writes:
> This is also related to exploratory prototype programming.  There is a place
> for dotting all the i's and crossing all the t's, but that place is generally
> at the end of programming, not at the beginning.

I agree with you about this, and for prototyping sometimes you can
toss maintainability as well.  So I think it's fine to prototype in
whatever language you find easiest (Python for me, Excel spreadsheets
for some other folks I know) and then do the real implementation
completely from scratch using the prototype as an experience base and
as a way to guide a specification.  The real implementation can be in
a completely different language if appropriate.
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hvt5lF3l9lvtU1@mid.individual.net>
Paul Rubin wrote:
> Don Geddis <···@geddis.org> writes:
>> This is also related to exploratory prototype programming.  There is a place
>> for dotting all the i's and crossing all the t's, but that place is generally
>> at the end of programming, not at the beginning.
> 
> I agree with you about this, and for prototyping sometimes you can
> toss maintainability as well.  So I think it's fine to prototype in
> whatever language you find easiest (Python for me, Excel spreadsheets
> for some other folks I know) and then do the real implementation
> completely from scratch using the prototype as an experience base and
> as a way to guide a specification.  The real implementation can be in
> a completely different language if appropriate.

Wouldn't it be much better if you could do both jobs in the same language?


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xfy2t9kbc.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> > I agree with you about this, and for prototyping sometimes you can
> > toss maintainability as well.  So I think it's fine to prototype in
> > whatever language you find easiest (Python for me, Excel spreadsheets
> > for some other folks I know) and then do the real implementation
> > completely from scratch using the prototype as an experience base and
> > as a way to guide a specification.  The real implementation can be in
> > a completely different language if appropriate.
> 
> Wouldn't it be much better if you could do both jobs in the same language?

Shrug.  If I want to go down the street, I use a bicycle.  If I want
to go to another country, I use an airplane.  Wouldn't it be much
better if I could go both places on the same vehicle?  Only if the
trade-offs are favorable.
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186666758.173771.186820@k79g2000hse.googlegroups.com>
On Aug 9, 10:35 am, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Pascal Costanza <····@p-cos.net> writes:
> > > I agree with you about this, and for prototyping sometimes you can
> > > toss maintainability as well.  So I think it's fine to prototype in
> > > whatever language you find easiest (Python for me, Excel spreadsheets
> > > for some other folks I know) and then do the real implementation
> > > completely from scratch using the prototype as an experience base and
> > > as a way to guide a specification.  The real implementation can be in
> > > a completely different language if appropriate.
>
> > Wouldn't it be much better if you could do both jobs in the same language?
>
> Shrug.  If I want to go down the street, I use a bicycle.  If I want
> to go to another country, I use an airplane.  Wouldn't it be much
> better if I could go both places on the same vehicle?  Only if the
> trade-offs are favorable.

The better analogy *might-be*:  using clay model for a prototype,
because clay models are easy to make, while using stainless steel for
the real product, because stainless steel is durable. If we change
easy to make with RAD &/or prototyping, and durability with whatever
production quality our product must have like fast, reliable, small
size,etc than Pascal question from this analogy perspective would
sound like :  Wouldn't it be much better if you could do both
(prototype and real product)  with the same material? Like having a
material that makes both easy to prototype with and deliver it?
The benefit might be being able to reuse larger percentage of your
prototype code, saving  yourself from rewriting or even redisigning
things from one language to the other, not to mention situation s
where prototyping language has features that production language
doesn't have at all, that would force you into redesigning the engine
or even worse like rethinking the whole architecture. Like going from
functional to OO language. Separation of  prototyping & production
language makes using Tracer Bullets (http://www.artima.com/intv/
tracer.html) nearly impossible. The other question is multi stage
development, you want something more than version 1.0. What will
happen after you finish the v1.0 in production langauge and you need
to gather knowledge/idea/experience of other are of your product which
works closely intermingled with rest of your code.  You don't have the
whole functionality implemented in prototype language as there is only
a small subset (if any ) so you're face with writing a interface to
your existing code, pretend that functionality exists ( if domain
allows) or famous pen and paper design . In the other hand folks who
choose same language for both prototyping & production could just
continue to code.
In my opinion the benefits outweight the cost by far but of course
your mileage may vary.

Slobodan
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0odhgtxm2.fsf@gemini.franz.com>
Paul Rubin <·············@NOSPAM.invalid> writes:

> Pascal Costanza <··@p-cos.net> writes:
>> > I agree with you about this, and for prototyping sometimes you can
>> > toss maintainability as well.  So I think it's fine to prototype in
>> > whatever language you find easiest (Python for me, Excel spreadsheets
>> > for some other folks I know) and then do the real implementation
>> > completely from scratch using the prototype as an experience base and
>> > as a way to guide a specification.  The real implementation can be in
>> > a completely different language if appropriate.
>> 
>> Wouldn't it be much better if you could do both jobs in the same language?
>
> Shrug.  If I want to go down the street, I use a bicycle.  If I want
> to go to another country, I use an airplane.  Wouldn't it be much
> better if I could go both places on the same vehicle?  Only if the
> trade-offs are favorable.

Absolutely.  In current movie terms, Common Lisp, being the
language-building language, is the ultimate Transformer.  A
transformer has a basic shape of an auto [1] it assumes in order not
to stand out too much but it can be magically transformed into
any number of mechanical things.  It can even transform itself into
something after touching that thing.

We never see who it was that programmed these transformers; presumably
they died on their planet and the transformers survive by
reprogramming and reshaping themselves.  So in that area, Common Lisp
is behind, because it always needs the Allspark cube (the essenxe of
us programmers) for activation; it hasn't yet learned to love without
us.

Now there are those on this thread who have scoffed at the idea of a
language actually being transformed to suit the requirements of the
task.  The thread is much too long, so I can't (i.e. won't bother to)
cite actual articles, but you know who you are - you're likely proud
of your disdain for these impure, transforming languages.  But this is
exactly the kind of thing we Lispers live for; forget purity; give me
Power!   You want a bicicle for street travel and a jet for
cross-country?  How about both, in one transforming package?

===

[1] the good transformers seemed to be mostly cars, but there is not a
perfect correlation; the closer correlation seemed to be that of the
bad transformers representing authority, like police and army.

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5i06atF3mq0l6U1@mid.individual.net>
Paul Rubin wrote:
> Pascal Costanza <··@p-cos.net> writes:
>>> I agree with you about this, and for prototyping sometimes you can
>>> toss maintainability as well.  So I think it's fine to prototype in
>>> whatever language you find easiest (Python for me, Excel spreadsheets
>>> for some other folks I know) and then do the real implementation
>>> completely from scratch using the prototype as an experience base and
>>> as a way to guide a specification.  The real implementation can be in
>>> a completely different language if appropriate.
>> Wouldn't it be much better if you could do both jobs in the same language?
> 
> Shrug.  If I want to go down the street, I use a bicycle.  If I want
> to go to another country, I use an airplane.  Wouldn't it be much
> better if I could go both places on the same vehicle? 

Sure.

> Only if the trade-offs are favorable.

Exactly.

The advantage of having one language in which you can do both is that it 
becomes easier to do the transition from a prototype to a real 
implementation gradually, so you can keep a fully working version all 
the time while you are doing the transition.

So what should a language look like in which you can do both?


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xps1xuiej.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> The advantage of having one language in which you can do both is that
> it becomes easier to do the transition from a prototype to a real
> implementation gradually, so you can keep a fully working version all
> the time while you are doing the transition.
> 
> So what should a language look like in which you can do both?

I don't find that transition happens like that.  The prototype is full
of false starts and terrible algorithms.  The real one is organized
completely differently and planned from the start.  It doesn't matter
whether they're in the same language or different languages.  I
prototyped something a couple months ago and it took about 30 hours to
run.  After a few runs I got tired of waiting that long so I rewote it
totally differently based on preprocessing 30+ GB of data, so it now
takes only about 5 hours (only about 1 CPU hour though, it's mostly
network waits, so not worth trying to port to a compiled language or
something like that).  Both versions are in Python.  I didn't re-use
any significant amount of code.  Maybe I could have if I'd planned
ahead, but the whole idea of prototyping is to go where the results
take you, instead of planning.  So if there was some benefit to using
a different language for the second version then I would have done it.
Actually I'd say both versions are prototypes and the "real" real one
will be integrated into a different existing application (the one that
right now is at the other end of all that network traffic).  The other
application is in Java and is large and mature, so that dictates how
this thing is going to end up.  I really don't feel like I'd gain much
of anything by using Lisp for this stuff, besides which it's a
multi-person project and I'm the only person in the group who has done
any Lisp.
From: Matthias Benkard
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186657582.607263.91850@g4g2000hsf.googlegroups.com>
Hi,

> The real one is organized
> completely differently and planned from the start.  It doesn't matter
> whether they're in the same language or different languages.

It sure is useful to have a prototyping language that supports the
same modelling style as the "real" one.

Fo example, some kind of a "Prototyping Haskell" would be nice.  But
what you'd end up with is essentially Haskell without the rigidity in
compile-time checks.  So why not simply add an option to the Haskell
compiler that makes the static type checks lenient?

I just don't see the problem here.  Why does a static type checker
_have_ to force me to go out of my way to silence it?  Why no �--i-
know-what-i-am-doing� compiler option that defers type safety (not
type _checking_ -- warnings are good, aborting the compiler is what's
bad) to run-time?

Mata ne,
Matthias
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmlmipr1hg5f3@corp.supernews.com>
Matthias Benkard wrote:
> Why no �--i- 
> know-what-i-am-doing� compiler option that defers type safety (not
> type _checking_ -- warnings are good, aborting the compiler is what's
> bad) to run-time?

Just replace unbound functions with a stub that raises a run-time exception:

  let foo _ = invalid_arg "foo"

You automate that with a macro easily enough. I don't know of anyone having
done this though: users apparently have no need of it.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x1weag6qg.fsf@ruckus.brouhaha.com>
Matthias Benkard <··········@gmail.com> writes:
> Fo example, some kind of a "Prototyping Haskell" would be nice.  But
> what you'd end up with is essentially Haskell without the rigidity in
> compile-time checks.  So why not simply add an option to the Haskell
> compiler that makes the static type checks lenient?

Basically I think there are technical reasons preventing this, that
might be surmountable, but Haskell users have not especially wanted
anything like that.

> I just don't see the problem here.  Why does a static type checker
> _have_ to force me to go out of my way to silence it?  Why no �--i-
> know-what-i-am-doing� compiler option that defers type safety (not
> type _checking_ -- warnings are good, aborting the compiler is what's
> bad) to run-time?

Can I ask if you actually use Haskell and/or have some knowledge of
it?  I'd put myself at the "some knowledge" level, i.e. I've been
playing with it and reading about it on and off, but have not yet done
anything nontrivial with it.  Anyway, at least based on my limited
understanding, what you're describing doesn't seem of much benefit,
since Haskell types are polymorphic.  So if you have a function

   f :: ThisType -> ThatType

and you say "f x" where x has the wrong type and the compiler
complains, you can replace x with (error "oops, x isn't implemented
yet") since the error function returns a completely general type and
thus the compiler will never flag it as a type error (it instead
raises a runtime exception).  I use this approach sometimes so maybe
that suffices for what you're asking. 

There is also something called unsafeCoerce# which does some kind of
scary madness at runtime that I don't understand, to convert one type
of object into another in a type-unsafe way.  I think it is
implemented as an FFI call.  Or even more extreme, there are packages
like Harpy, which lets you drop assembly code smack into the middle of
Haskell functions.  Obviously if you use stuff like this, Haskell's
type safety and runtime guarantees go out the window, but that seems
to be what you want.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87643q77c9.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Paul Rubin <·············@NOSPAM.invalid> writes:

> Sure, this is fine, I'm happy with anything that gives me more
> leverage to debug.  But I also want a compiler setting that says "I'm
> done debugging, this is the real thing, and if there is the slightest
> error including type errors, then the program should be considered
> broken".  That could be considered a zeroth order unit test, which any
> code checked into a production tree is required to pass, but which
> isn't required for debugging.

You really should drop Python in favor of SBCL:-) There you can do, e.g.,

(let ((asdf::*compile-file-failure-behaviour* :error))
  (asdf:operate 'asdf:load-op :name-of-your-system))

And it will be quite picky when compiling your code.

Nicolas
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xodhiwg6j.fsf@ruckus.brouhaha.com>
Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> You really should drop Python in favor of SBCL:-) There you can do, e.g.,
> 
> (let ((asdf::*compile-file-failure-behaviour* :error))
>   (asdf:operate 'asdf:load-op :name-of-your-system))
> 
> And it will be quite picky when compiling your code.

I sometimes regret never having been a serious Lisp hacker but I
really don't think it's the right thing any more.  I do sometimes kick
Python around (in favor of Lisp) in comp.lang.python the same way I've
gotten sucked into kicking Lisp around on this thread (in favor of ML
or something like that).  I find Python more enjoyable to use than
Lisp most of the time, and the semantics are pretty similar (except
Python's are often broken).  

Python is great for lightweight hacks, exploratory programming or
prototyping, that sort of thing.  I've always felt it too loose to do
anything "dangerous" in and the same about Lisp.  I worked on a couple
of financial projects prototyping in Python with the idea of
reimplementing in Java (server side) and C (AUGGGH!!)  (small embedded
client) once the Python prototype helped iron out the feature set and
specs, and the prototype approach was very successful.  But I would
have been quite uncomfortable with the idea of deploying a security
application in Python that was protecting real money.

The C thing was supposed to run in a small embedded ARM cpu, and if it
will help me out of the Lisp doghouse, my intention was to embed
Hedgehog Lisp (a small functional Lisp dialect) in the application to
code the higher level protocols, user interface, etc. in order to
minimize the amount of new C code in the project.  The project was
cancelled due to company management reconfiguration before we reached
that phase.

Right now I write standalone Python code all day to flog around piles
of data that come from trusted sources and from other code that I
wrote, so there aren't intense requirements for security or
reliability and Python is fine (except for being so slow).  This is
for use behind a web server written in Python that is (for now) buggy
as hell ;-).  But there is not any serious amount of money or safety
at stake if something goes wrong with this app, so Python is ok.

I started hanging out on clf due to general dissatisfaction with all
the languages I knew anything about and with the hope of migrating to
something more modern.  I've learned a tremendous amount here and I'm
very grateful to all the posters including the ones I get in fights
with ;-).
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5hti45F3lofk6U1@mid.individual.net>
Paul Rubin wrote:
> Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
>> You really should drop Python in favor of SBCL:-) There you can do, e.g.,
>>
>> (let ((asdf::*compile-file-failure-behaviour* :error))
>>   (asdf:operate 'asdf:load-op :name-of-your-system))
>>
>> And it will be quite picky when compiling your code.
> 
> I sometimes regret never having been a serious Lisp hacker but I
> really don't think it's the right thing any more. 

I have the impression that you're underestimating Lisp (but that's just 
a guess, I may be wrong). Yes, at a superficial level, Python and Lisp 
are quite similar. But they're not.

For a different perspective, browse around 
http://www.cs.utexas.edu/users/moore/acl2/ for some time, check out the 
examples, skim through some papers, etc.

Another interesting project is Qi - see 
http://www.lambdassociates.org/aboutqi.htm

The idea here is to show you that Lisp is not just a language, but a 
language framework. Yes, you can use it as a plain programming language, 
and many use it as such, but that's not the whole picture.

If you believe in singe-paradigm languages, which provide a minimal set 
of features that support exactly one programming style and nothing else, 
this will probably not work for you, though. The strength of Lisp is 
that it can support a broad range of different programming styles within 
the same framework. The examples of ACL2 and Qi show that this even 
includes statically checked sublanguages.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9B52E.2070103@gmail.com>
Pascal Costanza wrote:
{stuff deleted}
> I have the impression that you're underestimating Lisp (but that's just 
> a guess, I may be wrong). Yes, at a superficial level, Python and Lisp 
> are quite similar. But they're not.

I believe you have forgotten this historical roots of Lisp and it's 
relation to the lambda calculus. The *untyped* lambda calculus was a 
mistake of Chruch, he didn't realize the paradoxes it could cause in the 
logic. So he added strong typing to get rid of them.

Typed lambda calculi are a more interesting framework to study and use 
as a basis for languages than untyped lambda calculi.

http://en.wikipedia.org/wiki/Lambda_calculus
http://en.wikipedia.org/wiki/Typed_lambda_calculus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5htrbmF3lo60lU1@mid.individual.net>
Daniel C. Wang wrote:
> Pascal Costanza wrote:
> {stuff deleted}
>> I have the impression that you're underestimating Lisp (but that's 
>> just a guess, I may be wrong). Yes, at a superficial level, Python and 
>> Lisp are quite similar. But they're not.
> 
> I believe you have forgotten this historical roots of Lisp and it's 
> relation to the lambda calculus. The *untyped* lambda calculus was a 
> mistake of Chruch, he didn't realize the paradoxes it could cause in the 
> logic. So he added strong typing to get rid of them.

Yes, programming on nothing but paper can be quite hard. ;)


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x3ayudwvu.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> >> I have the impression that you're underestimating Lisp (but that's
> >> just a guess, I may be wrong). Yes, at a superficial level, Python
> >> and Lisp are quite similar. But they're not.
> > I believe you have forgotten this historical roots of Lisp and it's
> > relation to the lambda calculus. The *untyped* lambda calculus was a
> > mistake of Chruch, he didn't realize the paradoxes it could cause in
> > the logic. So he added strong typing to get rid of them.

Python and Lisp are both latently typed with runtime tags.  Python is
sort of a semi-braindead incarnation of Scheme, but with some nice
syntax sugar and a simple OO system and a cheesy coroutine-like setup
(generators) that together make it very refreshing and pleasant to
program in unless you're trying to do something difficult.  I'm well
aware that it's not anything like a full scale CL implementation.  I
remember thinking when I started using it, that I felt the same way
that the 1970's Maclisp hackers must have felt.
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <46B9D742.9010502@gmail.com>
Pascal Costanza wrote:
{stuff deleted}
> 
> Yes, programming on nothing but paper can be quite hard. ;)
> 

I'm always tempted to teach a 1st year programming course that way! I've 
seen way too many student submit homework assignment that "worked" by 
discovering them via trial an error.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-5BADDE.12580908082007@news-europe.giganews.com>
In article <···············@mid.individual.net>,
 Pascal Costanza <··@p-cos.net> wrote:

> Paul Rubin wrote:
> > Nicolas Neuss <········@mathematik.uni-karlsruhe.de> writes:
> >> You really should drop Python in favor of SBCL:-) There you can do, e.g.,
> >>
> >> (let ((asdf::*compile-file-failure-behaviour* :error))
> >>   (asdf:operate 'asdf:load-op :name-of-your-system))
> >>
> >> And it will be quite picky when compiling your code.
> > 
> > I sometimes regret never having been a serious Lisp hacker but I
> > really don't think it's the right thing any more. 
> 
> I have the impression that you're underestimating Lisp (but that's just 
> a guess, I may be wrong). Yes, at a superficial level, Python and Lisp 
> are quite similar. But they're not.

I think that (commercial) systems like LispWorks or Allegro CL
offer a tremendous value. If you have ever used LispWorks
or Allegro CL for something serious you'll see the difference.
The Allegro CL success stories are a really good showcase
what is possible: http://www.franz.com/success/
Many implementations are improving right now. From the point
of having good implementations, Common Lisp now looks
really good. There are full IDEs (like LispWorks
or Allegro CL) and small footprint implementations (like CLISP),
there is an affordable version for Windows (Corman CL),
more specialized versions (like Scieneer CL).
 
Not that more progress is not possible, but a lot
of progress is happening right now as we discuss
here.

> For a different perspective, browse around 
> http://www.cs.utexas.edu/users/moore/acl2/ for some time, check out the 
> examples, skim through some papers, etc.

Theorem provers are in general an area where you'll find
several in Lisp, (S)ML, Haskell, Prolog or C.

> Another interesting project is Qi - see 
> http://www.lambdassociates.org/aboutqi.htm

Another interesting project to look at is Axiom. Axiom
is a more modern computer algebra system (originally developed
as Scratchpad by IBM). It's base ideas are more advanced than
from Mathematica or Maxima. Axiom has an interesting typed interactive
language in some ways similar to Haskell. Axiom is open source
since some time. Just a few weeks there was a fork of the Axiom
sources with the goal to develop a version that is
more friendly to the Lisp developer: FriCAS.

http://www.math.uni.wroc.pl/~hebisch/fricas.html

It now compiles with GCL, CLISP, SBCL and OpenMCL.

The original Axiom open source project has a bit different
goals (around literate programming) and I hope that
this will also continue.

> The idea here is to show you that Lisp is not just a language, but a 
> language framework. Yes, you can use it as a plain programming language, 
> and many use it as such, but that's not the whole picture.

In the AI groups Lisp was always seen as 'AI assembler'.

> 
> If you believe in singe-paradigm languages, which provide a minimal set 
> of features that support exactly one programming style and nothing else, 
> this will probably not work for you, though. The strength of Lisp is 
> that it can support a broad range of different programming styles within 
> the same framework. The examples of ACL2 and Qi show that this even 
> includes statically checked sublanguages.
> 
> 
> Pascal

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xwsw21qgw.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> > I have the impression that you're underestimating Lisp (but that's just 
> > a guess, I may be wrong). Yes, at a superficial level, Python and Lisp 
> > are quite similar. But they're not.
> 
> I think that (commercial) systems like LispWorks or Allegro CL
> offer a tremendous value. If you have ever used LispWorks
> or Allegro CL for something serious you'll see the difference.

There's no chance that I'd use one of these commercial systems without
a compelling business reason to do so.  I might download one of the
free systems to play around with sometime.  I played with CLISP a long
time ago and it was pretty nice as well as small.  Before that, I used
KCL for a small prototyping project (maybe 1 KLOC) mentioned in
another post.

I don't have the impression any of the software-only Lisp enviroments
of the 1990's approached anything near the whizziness of the Lisp
machines of a decade earlier.  Maybe they've caught up by now, but at
least as I see it, the excitement is gone and it's just bricklaying
now.

There is also this: http://www.unlambda.com/cadr/index.html

> Another interesting project to look at is Axiom. Axiom
> is a more modern computer algebra system (originally developed
> as Scratchpad by IBM). It's base ideas are more advanced than
> from Mathematica or Maxima. Axiom has an interesting typed interactive
> language in some ways similar to Haskell. Axiom is open source
> since some time. 

Axiom is open source now?!!  That's great news.  Is it written in CL?

> In the AI groups Lisp was always seen as 'AI assembler'.

Right, it still seems that way, Scheme as a minimalistic assembler and
CL as a baroque macroassembler with airbags and sirens.  Yes you can
implement anything else on top of it, but as someone once said of
Hermann Oberth(?), "if Oberth wants to drill a hole, first he invents
the drill press".  Or in mathematics, sure you can do calculus
starting with pure set theory or category theory, but it's easier to
stick with abstractions that are already developed by other people and
whose usefulness is demonstrated.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-5CAD52.10310911082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > > I have the impression that you're underestimating Lisp (but that's just 
> > > a guess, I may be wrong). Yes, at a superficial level, Python and Lisp 
> > > are quite similar. But they're not.
> > 
> > I think that (commercial) systems like LispWorks or Allegro CL
> > offer a tremendous value. If you have ever used LispWorks
> > or Allegro CL for something serious you'll see the difference.
> 
> There's no chance that I'd use one of these commercial systems without
> a compelling business reason to do so.  I might download one of the
> free systems to play around with sometime.  I played with CLISP a long
> time ago and it was pretty nice as well as small.  Before that, I used
> KCL for a small prototyping project (maybe 1 KLOC) mentioned in
> another post.
> 
> I don't have the impression any of the software-only Lisp enviroments
> of the 1990's approached anything near the whizziness of the Lisp
> machines of a decade earlier.  Maybe they've caught up by now, but at
> least as I see it, the excitement is gone and it's just bricklaying
> now.
> 
> There is also this: http://www.unlambda.com/cadr/index.html
> 
> > Another interesting project to look at is Axiom. Axiom
> > is a more modern computer algebra system (originally developed
> > as Scratchpad by IBM). It's base ideas are more advanced than
> > from Mathematica or Maxima. Axiom has an interesting typed interactive
> > language in some ways similar to Haskell. Axiom is open source
> > since some time. 
> 
> Axiom is open source now?!!  That's great news.  Is it written in CL?

Yes.
 
> > In the AI groups Lisp was always seen as 'AI assembler'.
> 
> Right, it still seems that way, Scheme as a minimalistic assembler and
> CL as a baroque macroassembler with airbags and sirens.  Yes you can
> implement anything else on top of it, but as someone once said of
> Hermann Oberth(?), "if Oberth wants to drill a hole, first he invents
> the drill press".  Or in mathematics, sure you can do calculus
> starting with pure set theory or category theory, but it's easier to
> stick with abstractions that are already developed by other people and
> whose usefulness is demonstrated.

Paul, there is a lot of guessing and a lot of opinions in your
post without actual experience.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xabsysh33.fsf@ruckus.brouhaha.com>
Pascal Costanza <··@p-cos.net> writes:
> > I sometimes regret never having been a serious Lisp hacker but I
> > really don't think it's the right thing any more.
> 
> I have the impression that you're underestimating Lisp (but that's
> just a guess, I may be wrong).

I have no way to know if I'm underestimating Lisp, and no way to test
the theory other than spending a few years writing Lisp code
intensively and then deciding whether or not those years were wasted.
I'd consider this a high risk undertaking ;).  I was never a Lisp
expert but I think I'm not clueless about it.  Really I think the
heyday of Lisp was the Lisp machine era in the 1980's.

> For a different perspective, browse around
> http://www.cs.utexas.edu/users/moore/acl2/ for some time, check out
> the examples, skim through some papers, etc.
>
> Another interesting project is Qi - see
> http://www.lambdassociates.org/aboutqi.htm

Yes, I'm familiar with both of these.  ACL2 is very cool and I may try
it out sometime, though getting up to speed with any program like that
probably takes a lot of study, and right now I'm a bit more interested
in Coq.

> The idea here is to show you that Lisp is not just a language, but a
> language framework. Yes, you can use it as a plain programming
> language, and many use it as such, but that's not the whole picture.

Right, I understand this, I once prototyped a business app in CL a
using read macros to create a DSL.  I then wrote an embedded
interpreter from scratch that shipped in the actual product.  It was
used both a DSL for internal data in the app, and also as the
execution engine for a user-level scripting language (an unholy
mixture of Basic and Cobol, that got translated into S-expressions by
a yacc script, then interpreted by the Lisp evaluator).

But I think today I could have done the whole thing in Python a lot
more easily.  The main thing I needed at first was a way to represent
bunch of data that had tree-like structure, so I wrote a Lisp reader
which was an obvious, no-brain-cells approach.  Then I wrote a Lisp
printer to debug the reader.  Having a reader and printer, it was
impossible to resist adding an evaluator and GC.  And having this
embedded language but knowing our customers would think we were crazy
if they knew what we were using under the covers, we wrapped syntax
sugar around the Lisp and described it as a script language.  Today,
Python would have served all the above purposes adequately well.

> If you believe in singe-paradigm languages, which provide a minimal
> set of features that support exactly one programming style and nothing
> else, this will probably not work for you, though. The strength of
> Lisp is that it can support a broad range of different programming
> styles within the same framework. The examples of ACL2 and Qi show
> that this even includes statically checked sublanguages.

Well, sure, Liskell shows that it even includes Haskell, but really, I
think that ACL2 and Qi are only Lisp in a rather distant sense, while
Liskell is "syntax salt" (opposite of syntax sugar) on top of a
language with much different evaluation semantics from Lisp.  (A
couple people mentioned YHC but really, it's not as if that's simply
embeds Haskell by wrapping lambdas around all the terms for
call-by-name lazy evaluation; any non-toy Haskell evaluator has to use
graph reduction, so YHC has to be a full scale Greenspun).
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-736A70.11071811082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Pascal Costanza <··@p-cos.net> writes:
> > > I sometimes regret never having been a serious Lisp hacker but I
> > > really don't think it's the right thing any more.
> > 
> > I have the impression that you're underestimating Lisp (but that's
> > just a guess, I may be wrong).
> 
> I have no way to know if I'm underestimating Lisp, and no way to test
> the theory other than spending a few years writing Lisp code
> intensively and then deciding whether or not those years were wasted.
> I'd consider this a high risk undertaking ;).  I was never a Lisp
> expert but I think I'm not clueless about it.  Really I think the
> heyday of Lisp was the Lisp machine era in the 1980's.

If you use a current Lisp system like SBCL, OpenMCL,
LispWorks, CLisp, Allegro CL, Corman CL, Scieneer CL and others
now, you don't care much about the Lisp Machine era. Most
people are running now Windows, some kind o Unix
or Linux on their computers.

LispWorks for example tries to support programming on
those current operating systems.
It has a portable IDE and a very nice base implementation.

LispWorks enables for example the creation of
'true' applications. For example for InspiteDate
you would hardly guess that it is written in Lisp.
The app comes in similar versions for Windows and Mac OS X.
Here is video explaining InspireData (Quicktime):
http://www.inspiration.com/quicktours/index.cfm?fuseaction=inspiredata_QT

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xmywy1l3y.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> If you use a current Lisp system like SBCL, OpenMCL,
> LispWorks, CLisp, Allegro CL, Corman CL, Scieneer CL and others
> now, you don't care much about the Lisp Machine era. Most
> people are running now Windows, some kind o Unix
> or Linux on their computers.

I'm not sure what you mean by that.  I just mean the lisp machines had
a much nicer development/debugging environment than (e.g.) Franz even
quite a few years later.  Maybe the current stuff is even better than
the old lispm stuff (is it?  I'm interested to know and I'll trust
your opinion on that).  But I see that as just more improved creature
comforts, like making cars with better and better upholstery and
stereos, while keeping the drivetrain about the same as before.  The
interesting stuff happening in languages seems to be with typed
functional languages.

> LispWorks enables for example the creation of
> 'true' applications. For example for InspiteDate
> you would hardly guess that it is written in Lisp.
> The app comes in similar versions for Windows and Mac OS X.
> Here is video explaining InspireData (Quicktime):
> http://www.inspiration.com/quicktours/index.cfm?fuseaction=inspiredata_QT

I don't run Windows or OS X, and don't have the plugins to watch a
quicktime video, but yeah, it's cool if there's now good packaging
technology for Lisp programs.  I've done one Python program with real
Windows packaging (used InnoSetup, configured by a co-worker) and I
agree that it makes the results look more serious.  
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-B84D44.11312511082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > If you use a current Lisp system like SBCL, OpenMCL,
> > LispWorks, CLisp, Allegro CL, Corman CL, Scieneer CL and others
> > now, you don't care much about the Lisp Machine era. Most
> > people are running now Windows, some kind o Unix
> > or Linux on their computers.
> 
> I'm not sure what you mean by that.  I just mean the lisp machines had
> a much nicer development/debugging environment than (e.g.) Franz even
> quite a few years later.

How do you know?

>  Maybe the current stuff is even better than
> the old lispm stuff (is it?  I'm interested to know and I'll trust
> your opinion on that).

For current stuff, both Allegro CL and LispWorks are perfectly
fine.

>  But I see that as just more improved creature
> comforts, like making cars with better and better upholstery and
> stereos, while keeping the drivetrain about the same as before.  The
> interesting stuff happening in languages seems to be with typed
> functional languages.

If you are a research type maybe. If you are developing software,
no. In practical terms Allegro CL and LispWorks are a decade
ahead from most of your functional languages.

You don't need to wait for Lisp Machines to come back, to develop
Lisp-based software now. If you always wait for some abstract
ideal, you won't get done much.

> 
> > LispWorks enables for example the creation of
> > 'true' applications. For example for InspiteDate
> > you would hardly guess that it is written in Lisp.
> > The app comes in similar versions for Windows and Mac OS X.
> > Here is video explaining InspireData (Quicktime):
> > http://www.inspiration.com/quicktours/index.cfm?fuseaction=inspiredata_QT
> 
> I don't run Windows or OS X, and don't have the plugins to watch a
> quicktime video, but yeah, it's cool if there's now good packaging
> technology for Lisp programs.  I've done one Python program with real
> Windows packaging (used InnoSetup, configured by a co-worker) and I
> agree that it makes the results look more serious.  

The video would have shown you the user interface, not the packaging.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xfy2qtlny.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> > I'm not sure what you mean by that.  I just mean the lisp machines had
> > a much nicer development/debugging environment than (e.g.) Franz even
> > quite a few years later.
> 
> How do you know?

I wouldn't say that I personally used either one to any extent, but I
certainly saw other people using them (I worked at MIT and was a
student at Berkeley).  Of course that was still many years ago and I
can perfectly well understand that the stuff today is probably much
better.  Also, even though I didn't use the lisp machine myself, I
have some knowledge of its software from reading the orange nual and
writing a subset of Flavors as an emacs extension (in C).

> If you are a research type maybe. If you are developing software,
> no. In practical terms Allegro CL and LispWorks are a decade
> ahead from most of your functional languages.

Hmm, maybe there is something to that, certainly as far as the
development tools are concerned.  If I were a Windows user I'd
probably be trying out F#.  But mainly I'm interested in finding more
expressive ways to communicate my intentions to the computer, which is
what PL research is about (though I'm not a researcher, I guess my
interests are getting close to what the researchers are doing).  And
by that criterion, Lisp is perhaps one of the best of the Blubs, but
it's still a Blub.

> The video would have shown you the user interface, not the packaging.

Well all these languages have reasonable GUI toolkits, I hope.  So
they apps would look just like they were written in some other
language.

Actually, the Python thing I did used tkinter, which uses the awful
ugly Tk widgets instead of native Windows widgets, but that came out
in my favor since it was a factory floor application and the
"industrial" tk appearance kept the app from looking like it was done
in visual basic.  It came across as being more serious.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-CD645E.12372611082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > > I'm not sure what you mean by that.  I just mean the lisp machines had
> > > a much nicer development/debugging environment than (e.g.) Franz even
> > > quite a few years later.
> > 
> > How do you know?
> 
> I wouldn't say that I personally used either one to any extent, but I
> certainly saw other people using them (I worked at MIT and was a
> student at Berkeley).  Of course that was still many years ago and I
> can perfectly well understand that the stuff today is probably much
> better.  Also, even though I didn't use the lisp machine myself, I
> have some knowledge of its software from reading the orange nual and
> writing a subset of Flavors as an emacs extension (in C).

If you are into 'bondage & discipline' (see Hacker's dictionary,
http://www.catb.org/jargon/html/B/bondage-and-discipline-language.html )
languages and systems, you would have hated them. Even more
than current systems.

These describe the philosophy behind the LispM:
http://lispm.dyndns.org/genera-concepts/genera.html
http://lispm.dyndns.org/symbolics-5.html

Above is really the anti-christ for SFPL people. ;-)

> > If you are a research type maybe. If you are developing software,
> > no. In practical terms Allegro CL and LispWorks are a decade
> > ahead from most of your functional languages.
> 
> Hmm, maybe there is something to that, certainly as far as the
> development tools are concerned.  If I were a Windows user I'd
> probably be trying out F#.  But mainly I'm interested in finding more
> expressive ways to communicate my intentions to the computer, which is
> what PL research is about (though I'm not a researcher, I guess my
> interests are getting close to what the researchers are doing).  And
> by that criterion, Lisp is perhaps one of the best of the Blubs, but
> it's still a Blub.

I also want to use more expressive ways to communicate
my intentions to the computer. Unfortunately type systems
don't help ME much. For me they are heavy ball and chain.
I favor the flexibility and easy integration of different
paradigms, languages and dialects and the ability to
reason about those at runtime.

> > The video would have shown you the user interface, not the packaging.
> 
> Well all these languages have reasonable GUI toolkits, I hope.  So
> they apps would look just like they were written in some other
> language.
> 
> Actually, the Python thing I did used tkinter, which uses the awful
> ugly Tk widgets instead of native Windows widgets, but that came out
> in my favor since it was a factory floor application and the
> "industrial" tk appearance kept the app from looking like it was done
> in visual basic.  It came across as being more serious.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xzm0yjoex.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> If you are into 'bondage & discipline' (see Hacker's dictionary,
> http://www.catb.org/jargon/html/B/bondage-and-discipline-language.html )
> languages and systems, you would have hated them. Even more
> than current systems.

Well, back in those days, I was into Lisp.  I'm not sure about B&D but
I think Eric's objection was to bureaucracy (as found in languages like
Java), rather than clean separation of types at compile time.  

> These describe the philosophy behind the LispM:
> http://lispm.dyndns.org/genera-concepts/genera.html
> http://lispm.dyndns.org/symbolics-5.html
> Above is really the anti-christ for SFPL people. ;-)

Right, this is why I say I sort of regret not having been a Lisp
hacker back in those days, since that software was designed when
machines were tiny compared with now.  Today, it would be total
madness.

> I also want to use more expressive ways to communicate
> my intentions to the computer. Unfortunately type systems
> don't help ME much. For me they are heavy ball and chain.
> I favor the flexibility and easy integration of different
> paradigms, languages and dialects and the ability to
> reason about those at runtime.

I wonder if you've looked at Sweeney's article on "the next mainstream
language" that I started a clf thread about a couple weeks ago.  He is
an extremely practical game developer.  Among other things the
language he wants is dependently typed.  If you look at the problems
he faces, doing it in Lisp just sounds crazy.

PPT and PDF versions of same presentation:

  http://www.cs.princeton.edu/~dpw/popl/06/Tim-POPL.ppt
  http://www.st.cs.uni-sb.de/edu/seminare/2005/advanced-fp/docs/sweeny.pdf
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-45E8C1.14123811082007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > If you are into 'bondage & discipline' (see Hacker's dictionary,
> > http://www.catb.org/jargon/html/B/bondage-and-discipline-language.html )
> > languages and systems, you would have hated them. Even more
> > than current systems.
> 
> Well, back in those days, I was into Lisp.  I'm not sure about B&D but
> I think Eric's objection was to bureaucracy (as found in languages like
> Java), rather than clean separation of types at compile time.  
> 
> > These describe the philosophy behind the LispM:
> > http://lispm.dyndns.org/genera-concepts/genera.html
> > http://lispm.dyndns.org/symbolics-5.html
> > Above is really the anti-christ for SFPL people. ;-)
> 
> Right, this is why I say I sort of regret not having been a Lisp
> hacker back in those days, since that software was designed when
> machines were tiny compared with now.  Today, it would be total
> madness.

They were large for their time, though.
Still it indicates that we have lost something and
that somehow a few things have gone wrong.
Same thing Alan Kay speaks about.

> > I also want to use more expressive ways to communicate
> > my intentions to the computer. Unfortunately type systems
> > don't help ME much. For me they are heavy ball and chain.
> > I favor the flexibility and easy integration of different
> > paradigms, languages and dialects and the ability to
> > reason about those at runtime.
> 
> I wonder if you've looked at Sweeney's article on "the next mainstream
> language" that I started a clf thread about a couple weeks ago.  He is
> an extremely practical game developer.  Among other things the
> language he wants is dependently typed.  If you look at the problems
> he faces, doing it in Lisp just sounds crazy.
> 
> PPT and PDF versions of same presentation:
> 
>   http://www.cs.princeton.edu/~dpw/popl/06/Tim-POPL.ppt
>   http://www.st.cs.uni-sb.de/edu/seminare/2005/advanced-fp/docs/sweeny.pdf

You can compare that with:

The Technology of Jak & Daxter, Stephen White 
http://lispm.dyndns.org/documents/White_Stephen.pdf

-- 
http://lispm.dyndns.org
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186681906.391396.281430@e16g2000pri.googlegroups.com>
On Aug 7, 8:33 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
> Don Geddis <····@geddis.org> writes:
> > The idea was that there is a large and complex program, the vast majority
> > of which has valid syntax and does something useful, but one tiny corner has
> > an error of some kind (for example, a syntax error or a type error).
>
> Why not fix the error?  

Because the error has nothing to do with what I'm working on now.

> Why so impatient to run the code with a syntax (or type error)?  How
> hard is it to fix, possibly by writing a 1-line stub that signals an
> error?

That one line stub (which isn't one line - it usually has to fake a
return value as well, a value that may require some work if dummy
instances aren't required by the application), does exactly what the
run-time for a dynamically typed system does if you don't define the
stub - it signals an error at runtime.  (Note that being type-safe
didn't protect you from this run-time exception.)

Why are you so eager to write error signalling code whose only purpose
is to get the type system to let you do your work?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmlbk15im4hf2@corp.supernews.com>
Andy Freeman wrote:
> On Aug 7, 8:33 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
>> Don Geddis <····@geddis.org> writes:
>> > The idea was that there is a large and complex program, the vast
>> > majority of which has valid syntax and does something useful, but one
>> > tiny corner has an error of some kind (for example, a syntax error or a
>> > type error).
>>
>> Why not fix the error?
> 
> Because the error has nothing to do with what I'm working on now.

Then comment it out.

> Why are you so eager to write error signalling code whose only purpose
> is to get the type system to let you do your work?

Why are you feeding incorrect code to a compiler?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186683852.015595.66640@i13g2000prf.googlegroups.com>
On Aug 9, 10:48 am, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > On Aug 7, 8:33 pm, Paul Rubin <·············@NOSPAM.invalid> wrote:
> >> Don Geddis <····@geddis.org> writes:
> >> > The idea was that there is a large and complex program, the vast
> >> > majority of which has valid syntax and does something useful, but one
> >> > tiny corner has an error of some kind (for example, a syntax error or a
> >> > type error).
>
> >> Why not fix the error?
>
> > Because the error has nothing to do with what I'm working on now.
>
> Then comment it out.

In other words, static typing forces me to write both sides of an
interaction at once, even if I'd like to write just one side while I'm
doing something else.

> > Why are you so eager to write error signalling code whose only purpose
> > is to get the type system to let you do your work?
>
> Why are you feeding incorrect code to a compiler?

It's not incorrect - it's incomplete, and that incompleteness may well
be irrelevant for now.

If I comment out the line, I lose valuable information, namely, that
the incompleteness was relevant.  If a later run-time error occurs, I
get to figure out what actually happened.  Or, the program misbehaves
silently.  Either one requires work to figure out.

If I have to define a stub to signal the error, I had to define said
stub and make it so the type system is happy, but at least I get an
error close to the cause.

If the system throws a run time error, I get the benefit of the stub
without writing it.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bmurskrs8hra2@corp.supernews.com>
Andy Freeman wrote:
> It's not incorrect...

It doesn't work => it is incorrect.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186704302.629564.83220@e9g2000prf.googlegroups.com>
On Aug 9, 1:30 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> Andy Freeman wrote:
> > It's not incorrect...
>
> It doesn't work => it is incorrect.

It works for the inputs of interest.  It also works for the purposes
of development.

-andy
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9ge2j$ln5$1@aioe.org>
Jon Harrop escreveu:
> Andy Freeman wrote:
>> It's not incorrect...
> 
> It doesn't work => it is incorrect.
> 
Some posts ago you've complained on forced definitions about static 
typing, now you "decided" on this definition?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bqo79k10spm39@corp.supernews.com>
Cesar Rabak wrote:
> Jon Harrop escreveu:
>> Andy Freeman wrote:
>>> It's not incorrect...
>> 
>> It doesn't work => it is incorrect.
>> 
> Some posts ago you've complained on forced definitions about static
> typing, now you "decided" on this definition?

The above is independent of static typing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9kvef$ks$1@aioe.org>
Jon Harrop escreveu:
> Cesar Rabak wrote:
>> Jon Harrop escreveu:
>>> Andy Freeman wrote:
>>>> It's not incorrect...
>>> It doesn't work => it is incorrect.
>>>
>> Some posts ago you've complained on forced definitions about static
>> typing, now you "decided" on this definition?
> 
> The above is independent of static typing.
> 
Non sequitur to evade an observation about your behavior?

Or you agree your definition is bogus /independently/ of static typing?

Come on, Jon! We're attempting to keep a rational and educated dialog or 
this is really about a very focused and disguised marketing effort?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bscpoqtqbrpef@corp.supernews.com>
Cesar Rabak wrote:
> Come on, Jon! We're attempting to keep a rational and educated dialog or
> this is really about a very focused and disguised marketing effort?

I don't see what the confusion is about "if a function does not work then it
is not correct".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9lopt$22n$1@aioe.org>
Jon Harrop escreveu:
> Cesar Rabak wrote:
>> Come on, Jon! We're attempting to keep a rational and educated dialog or
>> this is really about a very focused and disguised marketing effort?
> 
> I don't see what the confusion is about "if a function does not work then it
> is not correct".
> 
Jon,

The confusion is being created by you by not keeping enough context. In 
order to sustain an intellectually useful discussion we have to abide to 
some simpler rules.
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1187020152.926761.145880@z24g2000prh.googlegroups.com>
On Aug 11, 2:59 pm, Jon Harrop <····@ffconsultancy.com> wrote:
> I don't see what the confusion is about "if a function does not work then it
> is not correct".

The "confusion" is that Harrop hasn't figured out that the code in
question DOES work for all inputs; it does the right thing.

The only problem is that making this code type-safe is a pain.  A
static type system is merely forcing folks to write code that has no
function other than making the type system happy.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <hx7io1vkd0.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Jon Harrop escreveu:
>> Cesar Rabak wrote:
>>> Jon Harrop escreveu:
>>>> Andy Freeman wrote:
>>>>> It's not incorrect...
>>>> It doesn't work => it is incorrect.
>>>>
>>> Some posts ago you've complained on forced definitions about static
>>> typing, now you "decided" on this definition?
>> The above is independent of static typing.
>>
> Non sequitur to evade an observation about your behavior?
>
> Or you agree your definition is bogus /independently/ of static typing?
>
> Come on, Jon! 


> We're attempting to keep a rational and educated dialog

You certainly don't.

Regards -- Markus
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9n7mu$9js$1@aioe.org>
Markus E.L. 2 escreveu:
> Cesar Rabak wrote:
> 
>> Jon Harrop escreveu:
>>> Cesar Rabak wrote:
>>>> Jon Harrop escreveu:
>>>>> Andy Freeman wrote:
>>>>>> It's not incorrect...
>>>>> It doesn't work => it is incorrect.
>>>>>
>>>> Some posts ago you've complained on forced definitions about static
>>>> typing, now you "decided" on this definition?
>>> The above is independent of static typing.
>>>
>> Non sequitur to evade an observation about your behavior?
>>
>> Or you agree your definition is bogus /independently/ of static typing?
>>
>> Come on, Jon! 
> 
> 
>> We're attempting to keep a rational and educated dialog
> 
> You certainly don't.
> 
> Regards -- Markus
Markus are addicted on insulting people gratuitously?

Hadn't you written you plonked me?

Are you Jon'n associate/partner or attorney? Let him stand for his 
posts, at least!
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <epeji7rgju.fsf@hod.lan.m-e-leypold.de>
Cesar Rabak wrote:

> Markus E.L. 2 escreveu:
>> Cesar Rabak wrote:
>>
>>> Jon Harrop escreveu:
>>>> Cesar Rabak wrote:
>>>>> Jon Harrop escreveu:
>>>>>> Andy Freeman wrote:
>>>>>>> It's not incorrect...
>>>>>> It doesn't work => it is incorrect.
>>>>>>
>>>>> Some posts ago you've complained on forced definitions about static
>>>>> typing, now you "decided" on this definition?
>>>> The above is independent of static typing.
>>>>
>>> Non sequitur to evade an observation about your behavior?
>>>
>>> Or you agree your definition is bogus /independently/ of static typing?
>>>
>>> Come on, Jon!
>>
>>> We're attempting to keep a rational and educated dialog
>> You certainly don't.
>> Regards -- Markus
> Markus are addicted on insulting people gratuitously?
>
> Hadn't you written you plonked me?

You know, plonking is not guarantee of protection for you to get away
without any comment. That would be too easy. It's rather more an
indication that one doesn't talk with you any more, but rather about
you. 

> Are you Jon'n associate/partner or attorney? Let him stand for his
> posts, at least!

Oh-oh. This reply was not about Jon but about you: Basically that
you're not entitled to say "We" in "We're attempting to keep a
rational and educated dialog". You should know, why.

Have a nice day -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xwsw2es13.fsf@ruckus.brouhaha.com>
Andy Freeman <······@earthlink.net> writes:
> > Why so impatient to run the code with a syntax (or type error)?  How
> > hard is it to fix, possibly by writing a 1-line stub that signals an
> > error?
> That one line stub (which isn't one line - it usually has to fake a
> return value as well, a value that may require some work if dummy
> instances aren't required by the application),

Every Haskell function must return a value.  The one line stub would
normally be something like (error "not yet implemented").  error is a
polymorphic builtin with type String -> a, where the "a" means a
completely arbitrary type; when evaluated, it raises an exception with
the string that you gave it.  The arbitrary type means it never gets
a type error.
From: Duane Rettig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <o0sl6udpc5.fsf@gemini.franz.com>
Don Geddis <···@geddis.org> writes:

> Jon Harrop <···@ffconsultancy.com> wrote on Wed, 08 Aug 2007:
>> Out of curiosity, if your favorite dynamic language was extended to allow
>> lexical and parse errors in "code that can never be run", would you be
>> ecstatic because it was suddenly even more powerful and expressive,
>> allowing an even wider variety of non-sensical jibberish to be fed into the
>> compiler?
>
> That particular example was for the sake of argument, and no, it isn't
> especially useful.
>
> But the general design idea of a language trying to do its best with whatever
> the programmer gives it, and especially one that allows execution of
> error-free paths even if other non-executed paths might fail, and for that
> matter one with a debugger that allows you to repair a program in the running
> image instead of only by making source code modifications followed by
> recompiling; yes, all those things I find quite valuable.

I've not yet seen in this thread any analogies to the link-editing
model, in which in a complete program all externals must be resolved
and are thus bound at link-time.  However, weak references and dynamic
references allow a program to be packaged as incomplete. A parallel
argument to a static-typing purist's argument might be that nobody
would ever want to run a program that is incomplete, which might cause
an exception for an undefined external (why not just eliminate the
reference to the external instead, and keep the program pure?)  Of
course, this then would preclude the concept of dlopen/dlsym and
the C "funcall" of functions found thereby.  Looks like even C isn't
as static as it seems... (C is of course static at the function level,
but the advent of dynamic-shared-libraries allow it to be dynamic at
the module/DSL level, whereas Lisp tends to be modular at the
function-al level).

-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186590246.556554.58980@d55g2000hsg.googlegroups.com>
On 8 Aug., 17:21, Duane Rettig <·····@franz.com> wrote:
> Don Geddis <····@geddis.org> writes:
> > Jon Harrop <····@ffconsultancy.com> wrote on Wed, 08 Aug 2007:
> >> Out of curiosity, if your favorite dynamic language was extended to allow
> >> lexical and parse errors in "code that can never be run", would you be
> >> ecstatic because it was suddenly even more powerful and expressive,
> >> allowing an even wider variety of non-sensical jibberish to be fed into the
> >> compiler?
>
> > That particular example was for the sake of argument, and no, it isn't
> > especially useful.
>
> > But the general design idea of a language trying to do its best with whatever
> > the programmer gives it, and especially one that allows execution of
> > error-free paths even if other non-executed paths might fail, and for that
> > matter one with a debugger that allows you to repair a program in the running
> > image instead of only by making source code modifications followed by
> > recompiling; yes, all those things I find quite valuable.
>
> I've not yet seen in this thread any analogies to the link-editing
> model, in which in a complete program all externals must be resolved
> and are thus bound at link-time.  However, weak references and dynamic
> references allow a program to be packaged as incomplete. A parallel
> argument to a static-typing purist's argument might be that nobody
> would ever want to run a program that is incomplete, which might cause
> an exception for an undefined external (why not just eliminate the
> reference to the external instead, and keep the program pure?)  Of
> course, this then would preclude the concept of dlopen/dlsym and
> the C "funcall" of functions found thereby.  Looks like even C isn't
> as static as it seems... (C is of course static at the function level,
> but the advent of dynamic-shared-libraries allow it to be dynamic at
> the module/DSL level, whereas Lisp tends to be modular at the
> function-al level).

I don't know if stuff like that has been done in one of the newer
strongly typed FPLs.
Yet I see no principal reason why this should not be possible. After
all, in practice, one expects dynamically loaded components to
implement one ore more interfaces/contracts or whatever you like to
call it. Thus, the contract could be checked at dlload time and then
the program could proceed as usual.
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46B9F025.9010506@gmail.com>
Ingo Menger wrote:
{stuff deleted}
> I don't know if stuff like that has been done in one of the newer
> strongly typed FPLs.
> Yet I see no principal reason why this should not be possible. After
> all, in practice, one expects dynamically loaded components to
> implement one ore more interfaces/contracts or whatever you like to
> call it. Thus, the contract could be checked at dlload time and then
> the program could proceed as usual.

Class loading in the JVM/CLR work this way, when you dynamically load 
and object/class you can introspect on it's type and downcast in a safe way.
From: Daniel C. Wang
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <MZWdnZDktIGzbyTbnZ2dnUVZ_hisnZ2d@comcast.com>
Erik Meijer and Peter Drayton. Static Typing Where Possible, Dynamic 
Typing When Needed: The End of the Cold War Between Programming 
Languages. Proc. OOPSLA'04 Workshop on Revival of Dynamic Languages.

http://research.microsoft.com/~emeijer/Papers/RDL04Meijer.pdf

I need to put this on my bookmarks link, so I can avoid having to drag 
this out next time this debates gets started again.
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7Fmui.21354$j7.383780@news.indigo.ie>
Daniel C. Wang wrote:

> Erik Meijer and Peter Drayton. Static Typing Where Possible, Dynamic
> Typing When Needed: The End of the Cold War Between Programming
> Languages. Proc. OOPSLA'04 Workshop on Revival of Dynamic Languages.
> 
> http://research.microsoft.com/~emeijer/Papers/RDL04Meijer.pdf
> 
> I need to put this on my bookmarks link, so I can avoid having to drag
> this out next time this debates gets started again.

Also possibly interesting, Siek & Taha "Gradual Typing":
http://www.cs.colorado.edu/~siek/pubs/pubs/2006/siek06:_gradual.pdf

(Yes, they are wrong about Common Lisp specifically in that paper, in
that (at the time) they apparently believed that common lisp type
declarations were only about eliding dynamic type checking for
performance, when more recent compilers (CMUCL &c.) significantly
change that -  But that does not mean their effort at formalisation is
of no interest to lispers).
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-A6AC12.19081108082007@news-europe.giganews.com>
In article <·····················@news.indigo.ie>,
 David Golden <············@oceanfree.net> wrote:

> Daniel C. Wang wrote:
> 
> > Erik Meijer and Peter Drayton. Static Typing Where Possible, Dynamic
> > Typing When Needed: The End of the Cold War Between Programming
> > Languages. Proc. OOPSLA'04 Workshop on Revival of Dynamic Languages.
> > 
> > http://research.microsoft.com/~emeijer/Papers/RDL04Meijer.pdf
> > 
> > I need to put this on my bookmarks link, so I can avoid having to drag
> > this out next time this debates gets started again.
> 
> Also possibly interesting, Siek & Taha "Gradual Typing":
> http://www.cs.colorado.edu/~siek/pubs/pubs/2006/siek06:_gradual.pdf
> 
> (Yes, they are wrong about Common Lisp specifically in that paper, in
> that (at the time) they apparently believed that common lisp type
> declarations were only about eliding dynamic type checking for
> performance, when more recent compilers (CMUCL &c.) significantly
> change that -  But that does not mean their effort at formalisation is
> of no interest to lispers).


"Many dynamically 
typed languages have optional type annotations that are used to im- 
prove run-time performance but not to increase the amount of static 
checking. Common LISP [23] and Dylan [12, 37] are examples of 
such languages."

I thought Dylan does type check at compile time where possible
and that types in Dylan were both about performance and
increasing static checking.

Am I wrong???

-- 
http://lispm.dyndns.org
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uxnui.21356$j7.383750@news.indigo.ie>
Rainer Joswig wrote:

> I thought Dylan does type check at compile time where possible
> and that types in Dylan were both about performance and
> increasing static checking.
> 
> Am I wrong???
> 

Well, I never really got into Dylan (unpleasant syntax!), I just said
they were wrong (or out-of-date) about Lisp, they might well be wrong
about Dylan too...

In their later (closely-related) paper, their related work  summary
changes just enough to mask:
http://ece.colorado.edu/~siek/gradual-obj.pdf

"""
8 . Related Work
/Type Annotations for Dynamic Languages/  Several dynamic programming
lan-guages allow explicit type annotations, such as Common LISP [33],
Dylan [16, 45], Cecil [10], Boo [13], extensions to Visual Basic.NET
and C# proposed by Meijer and Drayton [36], the Bigloo [8, 44] dialect
of Scheme [34], and the Strongtalk dialect of Smalltalk [6, 7]. In
these languages, adding type annotations brings some static checking
and/or improves performance, but the languages do not make the
guarantee that annotating all parameters in the program prevents
all type errors and type exceptions at run-time. This paper formalizes a
type system that provides this stronger guarantee.
"""


(not that lispers consider all kinds of run-time type errors fatal to a
running program anyway, what with correctable type error conditions
with store-value restarts...)
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <uk643sk7g0.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> I don't share this impression. When it is stated here that "I do unit
> tests, they'll catch type errors like a type system does" it is
> obvious that not even the fundmental epistemological difference

I will have to put 'epistemological' from the list of word I
understand to the list of words I actually use. It actually hits the
nail on its head here,

> between "test" and "proof" is clear.

and I fear that is much the point why this discussion actually happens
(or should I say "ferments" :-).

Regards -- Markus
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186508909.907532.239520@z24g2000prh.googlegroups.com>
On Aug 4, 4:46 am, Ingo Menger <···········@consultant.com> wrote:
> On 4 Aug., 06:06, Don Geddis <····@geddis.org> wrote:
> > Presumably, you mean to include dynamically typed languages like Common Lisp
> > under your "untyped" category, and compile-time statically typed languages
> > as the precursors to these "even more advanced type systems".
>
> Sure.

It would be nice if the folks arguing that some statically typed
systems are better than some dynamically typed systems would bother to
use correct terminology.

Lisp is NOT untyped.  It is dynamically typed.  If you don't
understand the difference or can't be bothered to make the
distinction, is it really likely that you understand type systems
enough to provide substantive comments?

Note that some statica type systems are better than some dynamic type
systems for certain applications.  It's also true that some dynamic
type systems are better than some static type systems for certain
applications.  Neither of those points are all that interesting.

The interesting claim would be that a certain static type system is
better than any dynamic type system for certain applications.
However, to make that argument would require technical chops that the
static type advocates in this discussion have yet to demonstrate.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhudg9g4jdjf4@corp.supernews.com>
Andy Freeman wrote:
> It would be nice if the folks arguing that some statically typed
> systems are better than some dynamically typed systems would bother to
> use correct terminology...

Pot, kettle.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8j1weiackf.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> Surely you can see how insulting your comments are to the highly skilled
> professional programmers 
<...>

> [...] mature programmers [...]

> Perhaps you'll begin to see that [...]

Oh my. 
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f9344s$909$1@online.de>
Pascal Costanza schrieb:
> All static type systems have to reject programs that may succeed at 
> runtime. The assumption of defenders of static type systems is that the 
> kinds of programs that may succeed but are nevertheless rejected are 
> marginal and uninteresting. The assumption of defenders of dynamic type 
> systems is that they are not.
> 
> This is independent of what the actual semantics are.

Agreed.

> An important case is when programming languages provide reflection at 
> runtime, including both introspection and intercession. Especially the 
> latter makes it at least extremely difficult to statically type-check 
> programs written in such languages.

Assuming "intercession" is the act where the program changes functions 
(say, change the signature of a function, or the number of fields in a 
type).

 > There is an important class of
> programs whose behaviors need to be updated and changed at runtime, and 
> such programs rely on reflection at runtime. I am not aware of any 
> static type system that is able to check such programs before 
> deployment, and I am convinced that they will never be able to do so, so 
> I don't bother.

Oh, but that's entirely possible. All you have to do is to forbid 
changing or removing existing type and function definitions. I.e. to 
fully identify an entity, you'd have to add its version number; old code 
continues to use the old definitions.
Old definitions could be garbage collected as soon as the last data 
object referencing them becomes garbage.

You'd want to upgrade persistent data at some time. For that, you'd need 
code to map old-type data structures to new-type ones (and, if the data 
is modifiable, vice versa). This could be a challenge - on the other 
hand, this kind of migration is always a challenge.
Complementarily, you could write your new code so that it can deal with 
old and new data alike.

Whatever the details, the entire scheme makes sure that definitions are 
always added, never removed or changed, so if the type system can handle 
additive changes, the whole thing typechecks. Even statically - if you 
accept that "statically" means "at module load time".

Regards,
Jo
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <bw1weibsty.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

> Ingo Menger wrote:
>> On 2 Aug., 10:50, Pascal Costanza <····@p-cos.net> wrote:
>>> Ingo Menger wrote:
>>>> On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
>>>>> Ingo Menger wrote:
>>>>>> One can understand type inference as a feature that sorts out programs
>>>>>> that can't possibly run without errors and leaves those that have
>>>>>> errors most probably.
>>>>> No, a static type system that enforces type soundness always has to
>>>>> reject certain programs that may succeed at runtime.
>>>> This is a question of semantics.
>>>> One can define a semantic, where nonsensical actions like passing the
>>>> empty list to a function that extracts the second element from a
>>>> tuple, would  result in some more or less meaningful value.
>>>> An example of a language with such a forgiving semantic is perl.
>>>> Certain nonsensical constructs produce the value undef and the program
>>>> can go on with the undef value.
>>> No, it's more than that.
>> Yes, it's something like that.
>> At least it makes no sense to speak of "programs" that "succeed" at
>> runtime without referring to a semantic.
>
> All static type systems have to reject programs that may succeed at
> runtime. The assumption of defenders of static type systems is that

Care to give an example?  I'm having a bit of trouble to understand
what "succeed" might mean regarding programs that have been
"reject(ed)" on the ground that they weren't well type, because there
is no semantics defined for the rejected programs. 

Care to rephrase your assertion in terms that are meaningful? What
does succeed mean there?

> the kinds of programs that may succeed but are nevertheless rejected
> are marginal and uninteresting. The assumption of defenders of dynamic
> type systems is that they are not.
>
> This is independent of what the actual semantics are.

No. To me the word succeed only makes sense as "produces the effects
and outputs described by the language semantics". But a "badly typed"
program is not program in the language: It's just text, the language
semantics doesn't say what it does. 

To succeed would it suffice to compile it to a NOOP? Or to "hello
world"? Undoubtly that feels wrong, but why? How do you distinguish
legal compiler output from bad one, legal behaviour of the program
produced from one the compiler should not have implemented, if you
don't have a semantics?

Make a suggestion.

> An important case is when programming languages provide reflection at
> runtime, including both introspection and intercession. Especially the
> latter makes it at least extremely difficult to statically type-check
> programs written in such languages. There is an important class of
> programs whose behaviors need to be updated and changed at runtime,
> and such programs rely on reflection at runtime. I am not aware of any
> static type system that is able to check such programs before
> deployment, and I am convinced that they will never be able to do so,
> so I don't bother.

I'm a bit mystified here: Java has a static type system. Java has
reflection. Perhaps I've misunderstood something.

> That's my personal conviction and I don't want to argue about personal
> convictions. 

As long as you voice them in public and they are at odd with what
other people percieve as well known technical or scientific facts (the
question what succeed means, in example) you'll probably have to
discuss them: Or live with other people rejecting them vocally with
you staying silent. Certainly (when I've time) I won't let them stand
like that.

> I am fine if you disagree that such programs are
> important, 

No. The problem run's deeper. What you say about succeed sounds nice
-- as far as natural language goes -- but does not make sense as a
(remotely) scientific statement on a programming language or compiler:
The word "succeed" is ill defined in what you wrote.

> as long as you are aware that this is mainly a matter of
> personal opinion. 

It isn't, not regarding your propositions on "succeeding". The rest of
your speech therefore makes only limited sense.

> I only strongly disagree with defenders of static
> type systems as soon as static typing is portrayed as the objectively
> only reasonable way to go, because that is stupid.

I doubt that many would put it like this. Most would agree, that a
safe dynamic type system is also a nice thing to have (after all it's
kind of a static type system with just one large union type :-), as
long as one doesn't allow unsafe typing (like in C). Most have reaped
benefits from static typeing therefore prefer that. You can't take
away their experience (and opinion) either.

> I just want to inform you about the fact that there are people who
> care about the programs that static type systems cannot check and who
> understand what the underlying issues are.

Ah, well: You didn't make a very clear statement yet, what programs
are in this class. Since you stipulate that it exists, you'll hardly
be able to avoid demands to somehow show that this class is not empty
or (as you already assume we would argue) that there are really
interesting (non pathological) programs in it.

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ejih1fnl.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Sun, 05 Aug 2007:
> Pascal Costanza wrote:
>> All static type systems have to reject programs that may succeed at
>> runtime.
>
> Care to give an example?  I'm having a bit of trouble to understand what
> "succeed" might mean regarding programs that have been "reject(ed)" on the
> ground that they weren't well type, because there is no semantics defined
> for the rejected programs.

You're missing the middle ground in any inference engine.  A compile-time
static type checker, when examining a piece of code, will conclude one of
THREE things, not just two:
1. the source code WILL generate a runtime type error;
2. the source code WILL NOT generate a runtime type error;
3. the static type checker cannot determine whether or not the source code
   will generate a runtime type error.

Languages with static type checkers refuse to compile code in category 1.
(We could even argue the utility of this case, but let's leave it for now.)
They do compile, and allow you to execute, code in category 2.

But what about category 3?  The code cannot be PROVEN type safe; but at the
same time it cannot be proven type UNSAFE.  Of course, most static type
langauges reject these programs as well.

But that doesn't mean that they necessarily have no semantics, or will
generate a runtime type error.

You could generate your own examples, but let me help you get started:

        p = smallest_prime_factor_of(...nnnn...);
        if p < 99999999
                then print "it's a small one!"
                else print sqrt("oh no, a string!")

The "else" clause passes a string to a function which should only take
numbers, so that ought to be a type error.

Except that, for many values of ...nnnn...., the else clause is never
reached at runtime, so a runtime error never occurs.

But no static type system will figure that out.

(Similarly, Pascal already gave another example earlier, with code that
contained
        (eval (read))
The code was perfectly well defined in Common Lisp, but that single line
basically invalidates any conclusions that any static type checker could make
about any part of the program.  So a static type checker would be unable to
prove that the program is type safe.  This does NOT mean that the program
generates a runtime type error, however, nor that the semantics are in any
way undefined.)

> To me the word succeed only makes sense as "produces the effects and
> outputs described by the language semantics". But a "badly typed" program
> is not program in the language: It's just text, the language semantics
> doesn't say what it does.

Don't you think that a type error, localized to unreachable code, only
needs contaminate that local area?  Why must the unreachable code necessarily
contaminate the semantics of the entire program?

Obviously in this case, "succeed" means that the program obeys the language
semantics for the code that actually gets executed.

And if you don't like the "unreachable code" example, you can always try
Pascal's (eval (read)) example instead.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If I could change one thing about the Bible, I'd make more things abominations.
	-- Deep Thoughts, by Jack Handey [1999]
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <k6fy2x2f99.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Sun, 05 Aug 2007:
>> Pascal Costanza wrote:
>>> All static type systems have to reject programs that may succeed at
>>> runtime.
>>
>> Care to give an example?  I'm having a bit of trouble to understand what
>> "succeed" might mean regarding programs that have been "reject(ed)" on the
>> ground that they weren't well type, because there is no semantics defined
>> for the rejected programs.
>
> You're missing the middle ground in any inference engine.  A compile-time
> static type checker, when examining a piece of code, will conclude one of
> THREE things, not just two:
> 1. the source code WILL generate a runtime type error;
> 2. the source code WILL NOT generate a runtime type error;
> 3. the static type checker cannot determine whether or not the source code
>    will generate a runtime type error.

Static type checkers _might_ flag places where runtime error might
potentially occur as a side benefit. But they only do that after the
program has been typed successfully. Badly typed typed programs have
no semantics. I repeat: They have no semantics. So the statement "may
succeed at runtime" makes no sense.

I suspect you don't know what you're talking about: We're talking
about static typing here, not a static checker or a data flow analysis
tool.

Regards -- Markus
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <877io8zke8.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Mon, 06 Aug 2007:
> Badly typed typed programs have no semantics. I repeat: They have no
> semantics. So the statement "may succeed at runtime" makes no sense.

You continue to confuse: (1) what can be proven by your static type checker;
with (2) what kinds of programs have semantics.

Static type checkers are not the last word on the semantics of computer
programs.  They are limited tools, and there is plenty of code that we can
easily assign semantics to, as human beings, which the static type algorithms
are unable to verify.

(As another aside: perhaps the confusion is what "badly typed" means.
In safe dynamically typed languages like Lisp, there is only a single union
type in your sense, and every function "succeeds" no matter what the input.
The "success" may be to generate a runtime type exception, but that's all
well within the semantics of the language.  Getting an array index out of
bounds; calling addition with a string; none of these things cause the program
to "have no semantics".)

> I suspect you don't know what you're talking about: We're talking about
> static typing here, not a static checker or a data flow analysis tool.

I gave you two examples, which I noticed you deleted without comment.  I
don't know how else I can help you.  I've provided both general hints
(inference is either limited or undecidable), and specific hints (actual
source code which has semantics but is not verifiable with static type
checkers).

You seem not willing to think about either, but instead just retreat to "if
I disagree with you, I must not know what I'm talking about."  It apparently
never even crosses your mind that you may be the one in error.

I'll try one last time, building on Pascal's example.  Please try to actually
think about this example before responding:

        define F(x) = x + 1;
        define G() = F("hello");
        print "Type an expression to be evaluated:"
        expr = read();
        eval(expr);
        G();

This program can be given semantics.  It certainly appears that F is a function
which takes only numbers as arguments.  It appears that G calls F with a
string instead of a number.  And the program eventually calls G.

However, it is not the case that this program will necessarily cause a runtime
type exception.  Before G is called, an arbitrary expression is read from the
user, and then evaluated.  That expression could well be a redefinition of G,
for example
        define G() = F(3);

No system can GUARANTEE that the program I gave above WILL result in a runtime
type error.  Neither can it prove that the program will not result in a runtime
type error.

The question is what to do about such code.  Fans of static typing basically
respond: "if my tool is unable to prove your code type-safe, then you will be
prohibited from running it."

That restriction assumes that this flawed tool of a static type checker is
necessarily superior to the desires of the experienced programmer.  Some of
us (generally the dynamic typing fans) think that the programmers should be
in charge of the tools, not vis versa.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
To me, boxing is like ballet, except there's no music, no choreography, and the
dancers hit each other.  -- Deep Thoughts, by Jack Handey [1999]
From: Rayiner Hashem
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186424757.282145.180300@22g2000hsm.googlegroups.com>
> You continue to confuse: (1) what can be proven by your static type checker;
> with (2) what kinds of programs have semantics.

The problem is that the static-typing folks are using an intentionally
obtuse meaning of "semantics". The type model constructed in a type
system defines a semantics for the program. Thus, technically, any
program that passes the type checker is semantically correct, and any
program that fails it is semantically incorrect.

However, nobody is ultimately interested in the semantics of the
program as defined by the type model, or even the semantics of the
program as defined by the project specification. What people are
interested in is the semantics of the program as defined by the
problem domain. Ie: program that causes a plane to try to touch-down
at 500 mph is wrong even if both the type model and the specification
say it's right. At the same time, a program can display a correct
behavior with respect to the problem domain even if its behavior with
respect to the type system (or the specification) is incorrect.
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ubqdkggwl.fsf@nhplace.com>
[ comp.lang.lisp only; http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Rayiner Hashem <·······@gmail.com> writes:

> > You continue to confuse: (1) what can be proven by your static type checker;
> > with (2) what kinds of programs have semantics.
> 
> The problem is that the static-typing folks are using an
> intentionally obtuse meaning of "semantics".  The type model
> constructed in a type system defines a semantics for the
> program. Thus, technically, any program that passes the type 
> checker is semantically correct, and any program that fails 
> it is semantically incorrect.

 semantics 
  ... 3a: the meaning or relationship of meanings of a sign or set of signs;
          especially: connotative meaning
       b: the language used (as in advertising or political propaganda)
          to achieve a desired effect on an audience espcially through the
          use of words with novel or dual meanings
 http://mw1.merriam-webster.com/dictionary/semantics

Given this definition, I don't know whether we want one of those or not. ;)

But seriously, I don't mind someone talking about "denotational semantics"
or something like that in a more restricted way because there is a modifier
attached that implicitly says, through its use, "there might be other kinds
of semantics if other modifiers are used".  But when one lays claim to the
word as a whole, that's a major deal to me.

Markus E.L. 2 wrote:

> Badly typed typed programs have no semantics.

Machines still find something to execute in what appears to me a
relatively deterministic way.  And Lisp programmers are able to
predict that behavior usefully, even to plan on it well enough that
they write many useful programs.

I thought the entire notion of "science" was about predictive models,
and I thought that when someone could usefully predict a behavior
using one technique and not with another, that science tended to 
discard the less predictive theory.  (That's not to say every theory
has to cover every situation, but I don't see a qualifying "that I 
understand" at the end of Markus's sentence quoted above.)

I'd completely understand a remark like "our theory doesn't cover that
sense of meaning", but I personally have a confusion about suggesting
that there is "no meaning" which appears to say that the theory which
can find no meaning nevertheless intends to cover that case.
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186482246.499632.263770@22g2000hsm.googlegroups.com>
On 6 Aug., 18:40, Don Geddis <····@geddis.org> wrote:
> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Mon, 06 Aug 2007:

> I'll try one last time, building on Pascal's example.  Please try to actually
> think about this example before responding:
>
>         define F(x) = x + 1;
>         define G() = F("hello");
>         print "Type an expression to be evaluated:"
>         expr = read();
>         eval(expr);
>         G();
>
> This program can be given semantics.  It certainly appears that F is a function
> which takes only numbers as arguments.  It appears that G calls F with a
> string instead of a number.  And the program eventually calls G.
>
> However, it is not the case that this program will necessarily cause a runtime
> type exception.  Before G is called, an arbitrary expression is read from the
> user, and then evaluated.  That expression could well be a redefinition of G,
> for example
>         define G() = F(3);
>
> No system can GUARANTEE that the program I gave above WILL result in a runtime
> type error.  Neither can it prove that the program will not result in a runtime
> type error.

This applies not only to systems, but also to humans. Since this thing
contains (eval (read)), nobody is able to say what this program will
do at runtime. We can only say on the meta-level, that this is yet
another, somewhat funny LISP interpreter.
That's exactly the point: One cannot reason *now* about a program that
will be typed in *later*.
Therefore, this example is worthless when the discussion is about
program verification. That a static analyzer for language A cannot
analyse the program in language B that is going to be executed by an
interpreter for B written in language A, is trivial. But this holds
true even when A and B happen to be the same languages.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <y1fy2v4ctg.fsf@hod.lan.m-e-leypold.de>
Ingo Menger wrote:

> On 6 Aug., 18:40, Don Geddis <····@geddis.org> wrote:
>> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Mon, 06 Aug 2007:
>
>> I'll try one last time, building on Pascal's example.  Please try to actually
>> think about this example before responding:
>>
>>         define F(x) = x + 1;
>>         define G() = F("hello");
>>         print "Type an expression to be evaluated:"
>>         expr = read();
>>         eval(expr);
>>         G();
>>
>> This program can be given semantics.  It certainly appears that F is a function
>> which takes only numbers as arguments.  It appears that G calls F with a
>> string instead of a number.  And the program eventually calls G.
>>
>> However, it is not the case that this program will necessarily cause a runtime
>> type exception.  Before G is called, an arbitrary expression is read from the
>> user, and then evaluated.  That expression could well be a redefinition of G,
>> for example
>>         define G() = F(3);
>>
>> No system can GUARANTEE that the program I gave above WILL result in a runtime
>> type error.  Neither can it prove that the program will not result in a runtime
>> type error.
>
> This applies not only to systems, but also to humans. Since this thing
> contains (eval (read)), nobody is able to say what this program will
> do at runtime. We can only say on the meta-level, that this is yet
> another, somewhat funny LISP interpreter.
> That's exactly the point: One cannot reason *now* about a program that
> will be typed in *later*.
> Therefore, this example is worthless when the discussion is about
> program verification. 

The discussion in this subthread AFAIR was about running badly typed
programs anyway. I'd agree with Pascal and Don that there are programs
(with well defined semantics) in dynamically typed languages that have
no corresponding counterparts in a given statically typed
language. But insisting that a badly typed program text written in the
syntax of a statically typed language "has meaning" or "is meaningful"
is just plain stupid: Meaning is assigned by the language semantics
and -- in statically typed languages -- that is only defined on well
defined programs. Of course we might have another dynamically typed
language with accidentally the same syntax whose compiler/interpreter
just accepts the program text in question. But this another language.

Of course one could extend the semantics of a statically typed
language to cover input that

One would then have a language L1 which is for all intents and
purposes the statically typed language one had before and L2 which
extents L1 and also accepts programs that were rejected by L2 at the
cost of run time type errors that might occur during execution of
these programs. An interesting concept: L2 is basically a dynamically
typed language that has a statically typable language embedded.

But: (1) I don't think the extension will be easy. (2) It will perhaps
not enable revolutionary new ways of programming. (3) Deploying it in
the field would require to integrate a compiler in the L2 language
runtime and (4) I don't see what it really is good for, EXCEPT as a
theoretical exercise (as which it is interesting because of the
relationship to automatic program verification).

And finally: That was not the way Rayiner and Pascal formulated their
concern. They insisted that program texts with static type errors have
"meaning". So we're are talking about a statically typed language here
and, as I said, there is no meaning to be assigned to a badly typed
program in this case.

The eval example won't change a jota with respect to this, because it
is written in a language without static typing. True, as I said: This
has no equivalent counterpart in a statically typed language. But to
put it forth as an example that programs that don't type well have a
meaning is plain nonsense: The expression "typing well" has only
meaning in the context of a specific language (and a different one in
each).

Enough said: I hope that is the last I want to say about this
topic. Pascal and his follower cannot be convinced, because they
insist on talking about meaning without grounding it in a formal or
semiformal language semantics, and one cannot refute "opinions" that
expressed fuzzily enough because there is not enough hard logic and
commitment in them to refute by contradiction. You only can say "no no
no" and get back "but but but". That does lead nowhere -- because
Because what we're dicussing is ill defined.

(And Pascal: I think you have been asked repeatedly what you mean by
assigning a meaning to (statically) badly typed programs: You came
forward with _examples_ in dynamically typed languages but you didn't
give a general explanation of what you mean. You certainly didn't talk
about extending language semantics (though I've been waiting for it
some time). The latter especially has contributed to hardening my
conviction that you've not given your requests much thought and that
what you say is more based on a gut feeling "wouldn't it be nice if X"
where X is sometheing around "we could it run anyway" or "I could
program dynamically typed in statically typed languages" or "every
language would be like Lisp".)

So my friends: This (sub) discussion is closed from my side: The
proponents of "can't we run that anyway" have now had more than a week
to come forward with an explanation what they actually want. Not one
of them came forward with the straightforward formulation "can't we
extend the semantics of a statically typed language in a way the",
instead they are meandering between badly conceived examples, wishful
thinking and some insults.

Booohooo, I say. But this is usenet after all.

Regards -- Markus
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hrpnoF3lo5j5U1@mid.individual.net>
Markus E.L. 2 wrote:

> The discussion in this subthread AFAIR was about running badly typed
> programs anyway. I'd agree with Pascal and Don that there are programs
> (with well defined semantics) in dynamically typed languages that have
> no corresponding counterparts in a given statically typed
> language. 

...which means that there are things you can express in dynamically 
typed languages which you cannot express in statically typed languages.

> And finally: That was not the way Rayiner and Pascal formulated their
> concern. They insisted that program texts with static type errors have
> "meaning". So we're are talking about a statically typed language here
> and, as I said, there is no meaning to be assigned to a badly typed
> program in this case.

You should read more carefully: The claim was that there are programs 
static type systems will probably never be able to accept but which are 
still meaningful. Such a claim is independent of any statically typed 
language.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhu2qlao57rd2@corp.supernews.com>
Markus E.L. 2 wrote:
> The discussion in this subthread AFAIR was about running badly typed
> programs anyway. I'd agree with Pascal and Don that there are programs
> (with well defined semantics) in dynamically typed languages that have
> no corresponding counterparts in a given statically typed
> language.

I don't think even that is true.

This whole thread is based upon ill-defined notions of several key terms.
Static typing is the first one. Pascal seems to be using a definition
of "static typing" that requires the checker to prevent uncheckable
programs from being run. Nothing fundamentally requires that, of course, a
static type checker could just emit warnings and continue regardless. Many
Lisp implementations do this, such as the static type checker in SBCL:

* (defun f () (+ 1 "foo"))
; in: LAMBDA NIL
;     +
;
; note: deleting unreachable code
;
; caught WARNING:
;   Asserted type NUMBER conflicts with derived type
;   (VALUES (SIMPLE-ARRAY CHARACTER (3)) &OPTIONAL).
;   See also:
;     The SBCL Manual, Node "Handling of Types"
;
; compilation unit finished
;   caught 1 WARNING condition
;   printed 1 note

F

He then goes to on to say that static typing means that uncheckable programs
will not be run, which is his implicit definition of "static typing" rather
than an argument.

As you say, Markus, there is also the question of what "succeed"
and "reject" mean. Was the above program rejected by SBCL?

OCaml emits a warning because it knows the following function definition is
wrong:

# type t = A | B;;
type t = A | B
# let f = function A -> 0;;
Warning P: this pattern-matching is not exhaustive.
Here is an example of a value that is not matched:
B
val f : t -> int = <fun>

Did that "succeed" or was it "rejected"?

To be frank, nobody should care. This is just a religious debate that falls
down to terminology. SBCL is clearly trying to check types at compile time.
The authors of SBCL would not bother going to such great lengths to
introduce extensive checking of types at compile time if it wasn't
worthwhile. So we should all be able to agree that checking types at
compile time is a good thing.

The real debate is how much are you willing to sacrifice in the name of
static checking? For example, how often do you use polymorphic recursion in
your Lisp code?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <871weevjwn.fsf@geddis.org>
Jon Harrop <···@ffconsultancy.com> wrote on Tue, 07 Aug 2007:
> This whole thread is based upon ill-defined notions of several key terms.
> Static typing is the first one. Pascal seems to be using a definition
> of "static typing" that requires the checker to prevent uncheckable
> programs from being run. Nothing fundamentally requires that, of course, a
> static type checker could just emit warnings and continue regardless.

Well, of course.  In fact, I was hoping some of the static typing fans in this
thread might eventually come to this realization.

The real issue in the debate is not compile-time checks per se; it is about
the utility of typical programming languages that are called "statically
typed", vs. those languages called "dynamically typed".  Do the former have
some insight that the latter are missing?  The static typing fans sure think
so.

Those languages said to be in the set of "statically typed" do indeed reject
programs that their checker cannot label as "well typed".  The debate actually
is over this rejection of programs, not so much over the very idea of doing
checks of various kinds at compile time.

> Lisp implementations do this, such as the static type checker in SBCL:

Absolutely.

> He then goes to on to say that static typing means that uncheckable programs
> will not be run, which is his implicit definition of "static typing" rather
> than an argument.

Pascal's not the only one.  Everybody on the thread so far, up to your post,
has agreed on this aspect of the debate.  Even though, as you say, it isn't
really true.

> As you say, Markus, there is also the question of what "succeed"
> and "reject" mean. Was the above program rejected by SBCL?

No, of course it wasn't rejected.  SBCL generated compiled object code,
and you can run that code and things happen.  Not random things; exactly the
things demanded by the semantics of Lisp.

> OCaml emits a warning because it knows the following function definition is
> wrong:
> Did that "succeed" or was it "rejected"?

I don't know OCaml well enough to say for sure, but so far every programming
language that touts itself as "statically typed" refuses to allow execution
of a program if its type checker can't label every expression successfully.

> To be frank, nobody should care. This is just a religious debate that falls
> down to terminology. SBCL is clearly trying to check types at compile time.
> The authors of SBCL would not bother going to such great lengths to
> introduce extensive checking of types at compile time if it wasn't
> worthwhile. So we should all be able to agree that checking types at
> compile time is a good thing.

Yes, yes, all that's true.

What people care about is what happens if the static type checker fails to be
happy with a program.  (And more particularly: can the language itself offer
features which cannot possibly be successfully type checked at compile time.)
Will the language/compile be willing to run it anyway?

This is a debate with real content.

> The real debate is how much are you willing to sacrifice in the name of
> static checking?

I'm annoyed to be forced to agree with you.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
If trees could scream, would we be so cavalier about cutting them down?  We
might, if they screamed all the time, for no good reason.
	-- Deep Thoughts, by Jack Handey [1999]
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9pt9c$nbi$1@news.xmission.com>
Don Geddis wrote:

[...]

> Those languages said to be in the set of "statically typed" do indeed reject
> programs that their checker cannot label as "well typed". [...]

As explained elsewhere, any program that can be expressed in a 
dynamically-typed language can be expressed in a "statically-typed" 
language (all else being equal) if that language allows for 
discriminated unions plus exceptions. The exceptions are thrown when 
there is the equivalent of a runtime type error. (Actually, exceptions 
are more powerful than runtime type errors, but never mind that for now.)

The real burden the type system places on the programmer is that the 
programmer must explicitly acknowledge in their code the places where 
"runtime type errors" are allowed to happen. That's it. The idea that a 
type system prevents a whole class of programs from running is uninformed.

-thant
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87r6m7m455.fsf@geddis.org>
Thant Tessman <···@standarddeviance.com> wrote on Mon, 13 Aug 2007:
> Don Geddis wrote:
>> Those languages said to be in the set of "statically typed" do indeed reject
>> programs that their checker cannot label as "well typed".
>
> As explained elsewhere, any program that can be expressed in a
> dynamically-typed language can be expressed in a "statically-typed"
> language (all else being equal) if that language allows for discriminated
> unions plus exceptions. The exceptions are thrown when there is the
> equivalent of a runtime type error. (Actually, exceptions are more powerful
> than runtime type errors, but never mind that for now.)  The real burden
> the type system places on the programmer is that the programmer must
> explicitly acknowledge in their code the places where "runtime type errors"
> are allowed to happen. That's it.

This is bordering on a Turing argument, which is uninteresting.

In the way that programmers actually use statically typed languages, some
code (which could run just fine in a dynamic version of the same language)
might not be typeable by the checker.  The fact that one could rewrite this
code -- in an awkward style used by basically no one -- to get it to compile
is not particularly relevant.  Yes of course that is POSSIBLE, but it isn't
a solution to problem recommended by pretty much anybody.

Certainly, languages like Haskell and SML don't provide much support for
programming in such a style.  If you choose to use them in that way, you're
basically fighting the language, instead of having it help you program.

> The idea that a type system prevents a whole class of programs from running
> is uninformed.

And that's not quite true either.  Even on top of the problems with (some)
programs expressible in the language, statically typed languages simply avoid
providing features (like EVAL, or dynamic class or function redefinitions)
which break the type checker.

In that sense, the design of the programming language started first from the
belief that the type checkers was the #1 priority; any language feature which
made type checking problematic was removed from the (developing) language.
This does NOT mean such features are useless to programmers.  Merely that they
don't play well with this other priority.

So yes, there is a "whole class of programs" which the type system has
(indirectly) prevented the programmer from running.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Philip knew that as king he could not be tried for murder.  Plus, there was no
one he really wanted to murder.  So, either way, things were starting to look
pretty good.  -- Deep Thoughts, by Jack Handey [1999]
From: Thant Tessman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f9r63m$rrc$1@news.xmission.com>
Don Geddis wrote:
> Thant Tessman <···@standarddeviance.com> wrote on Mon, 13 Aug 2007:

[...]

>> As explained elsewhere, any program that can be expressed in a
>> dynamically-typed language can be expressed in a "statically-typed"
>> language (all else being equal) if that language allows for discriminated
>> unions plus exceptions. [...]
> 
> This is bordering on a Turing argument, which is uninteresting.

No it isn't. That's the point. At best it's syntactically inconvenient, 
which is a long way from Turing equivalent.


> [...] Even on top of the problems with (some)
> programs expressible in the language, statically typed languages simply avoid
> providing features (like EVAL, or dynamic class or function redefinitions)
> which break the type checker. [...]

Two questions: 1) If you took eval and top-level rebinding out of Scheme 
or Lisp, would you still call it dynamically typed? 2) How convincing of 
an argument can you make that eval and top-level rebinding is genuinely 
incompatible with a type system? (Yes, languages with type systems as a 
rule tend not to put too much energy into supporting these things, but 
might that be an orthogonal issue?)

-thant
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c20rpk6ofkqeb@corp.supernews.com>
Don Geddis wrote:
>> The idea that a type system prevents a whole class of programs from
>> running is uninformed.
> 
> And that's not quite true either.  Even on top of the problems with (some)
> programs expressible in the language, statically typed languages simply
> avoid providing features (like EVAL, or dynamic class or function
> redefinitions) which break the type checker.

What about MetaOCaml and F#?

> So yes, there is a "whole class of programs" which the type system has
> (indirectly) prevented the programmer from running.

Can you give an example? The only thing I can think of is polymorphic
recursion, which I've never actually needed. Have you?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87sl6mj6rm.fsf@geddis.org>
Jon Harrop <···@ffconsultancy.com> wrote on Tue, 14 Aug 2007:
> Don Geddis wrote:
>> So yes, there is a "whole class of programs" which the type system has
>> (indirectly) prevented the programmer from running.
>
> Can you give an example? The only thing I can think of is polymorphic
> recursion, which I've never actually needed. Have you?

I was thinking of Pascal's example with EVAL.  Perhaps I can write a similar
one in some kind of bastard pseudocode:

        define f(x) = x + 1;
        define g() = { define f(x) = concatenate(x," new!"); };
        print f(5);
        g();
        print f("This is ");

I would expect such code, when evaluated, to print

        6
        This is new!

I would also expect code like this to not be "well typed" in any reasonable
static type checker.  I obviously don't know every statically typed language,
but certainly the standard ones like ML or Haskell couldn't compile source
code analogous to what I've written above.

(It's generally because they don't offer language constructs which allow
runtime function redefinition.  But that's not an accidental omission;
there's no obvious way to add such capability to those languages, while
preserving the static typing properties that their communities value.)

It's a separate discussion whether code like I've written is "useful" or not.
But some people on this thread have claimed that code which is not "well
typed" (necessarily) doesn't even have semantics, and that claim is clearly
silly.  It's easy to give semantics to the kind of code I wrote above, and
many dynamically typed languages -- such as Lisp -- do so.

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
Children are natural mimics who act like their parents despite every effort to
teach them good manners.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <dqhcn1c3ld.fsf@hod.lan.m-e-leypold.de>
> It's a separate discussion whether code like I've written is "useful" or not.
> But some people on this thread have claimed that code which is not "well
> typed" (necessarily) doesn't even have semantics, and that claim is clearly

You still don't understand, that the original request (by P Constanza,
if I'm right) was for a change in the way that _statically typed_
langauges work, because 'even badly typed code has meaning'. The claim
that badly typed code (or at least not statically typable code) has
meaning has been made for __statically typed languages__. Which is
utter nonsense if one knows a bit about the way the semantics in these
languages are defined.

Your attempt to shift the battle field to dynamically typed languages
is futile (and in my view smacks of trolldom). Just try to read back
in which context the orginal claim has been made and, please, stop
distorting what others said.

> silly.  It's easy to give semantics to the kind of code I wrote above, and
> many dynamically typed languages -- such as Lisp -- do so.

It's not so easy to do so as extension to a statically typed language
AND keep features like type inference.

- M
From: Don Geddis
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <871we4gmpl.fsf@geddis.org>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Tue, 14 Aug 2007:
I wrote:
>> It's a separate discussion whether code like I've written is "useful" or
>> not.  But some people on this thread have claimed that code which is not
>> "well typed" (necessarily) doesn't even have semantics, and that claim is
>> clearly
>
> You still don't understand, that the original request (by P Constanza, if
> I'm right)

On the contrary, I've followed this entire thread, and it is you that had a
limited understanding of the discussion.

> was for a change in the way that _statically typed_ langauges work, because
> 'even badly typed code has meaning'. The claim that badly typed code (or at
> least not statically typable code) has meaning has been made for
> __statically typed languages__. Which is utter nonsense if one knows a bit
> about the way the semantics in these languages are defined.

Any existing language already has its semantics defined.  It obviously doesn't
make sense to talk about source code that, within a given language, is already
well-specified to not have semantics.  Why would you think that _anyone_ would
ever attempt to discuss such an issue?

No, of course the real question was, how _could_ some new language (either a
brand new one, or perhaps a modification of an existing statically or
dyanamically typed language) be defined?  What, in the abstract, are the
benefits of static vs. dynamic typing?

Pascal's claim, which I support, is that the way semantics are defined -- by
choice! -- in typical statically typed languages (leaving aside, for the
moment, some of Jon Harrop's comments on some more advanced "static"
languages), eliminates some useful language features that are valued by
programmers who prefer dyanamically typed languages.  This is in contrast to
an earlier claim that static typing is clearly the right way to go for future
programming, and that in the future "of course" all respected programming
languages will use static typing, and dynamically typed languages will only
be good for "toy scripts".

The response to this original prediction was to show that there are code
fragments -- not necessarily from any language currently in existence
(although most of them run today in Lisp), but rather from some a
hypothetical future language -- that may be useful to the programmers, but
which are not amenable to being labelled "well typed" by a static type
checker.

The response to _that_ example was: "badly typed code has no semantics".
Which is at best highly misleading (if what was meant was simply that some
existing statically typed languages _define_ such code fragments to have no
meaning), and at worst simply false (if something useful was meant, such as
that no possible language could ever assign semantics to such kinds of code).

> Your attempt to shift the battle field to dynamically typed languages is
> futile (and in my view smacks of trolldom). Just try to read back in which
> context the orginal claim has been made and, please, stop distorting what
> others said.

Was the topic not about a contrast between the possibilities of static typing
vs. dynamic typing, and their effects on language design?  How could one even
have such a discussion if you are unwilling to think about dynamic typing?
How would you ever resolve such a topic by only considering existing languages
already defined with static type checkers?  Without even considering possible
future statically typed languages, much less existing dynamic languages.

I really don't see what you think this discussion is about.

>> silly.  It's easy to give semantics to the kind of code I wrote above, and
>> many dynamically typed languages -- such as Lisp -- do so.
>
> It's not so easy to do so as extension to a statically typed language
> AND keep features like type inference.

Well ... yes.  Finally some insight arrives.

This whole thread might have been short circuited if the static typing fans
had merely said: "static/dynamic typing is a tradeoff, with the following pros
and cons, and we are part of the programming community that prefers the static
side of the tradeoff."

Instead of: "static typing is the future; all modern languages use it;
languages that use dynamic typing are relics of the past, soon to be extinct
or only used for toy scripts.  But no educated, serious, mature programmer
could possibly make the tradeoff that the benefits of dynamic typing are worth
the cost of losing the static type checker.  Only ignorant, uneducated, naive
programmers believe such primitive nonsense."

You've taken the first step towards a larger world, where you might consider
some hypothetical future programming language that offers the benefits of
BOTH static and dynamic typing.  There is no reason, in principle, why the
two styles could not co-exist (in some future language).

But first, you'd have to understand the benefits that some programmers find
in dynamically typed languages...

        -- Don
_______________________________________________________________________________
Don Geddis                  http://don.geddis.org/               ···@geddis.org
/earth:  file system full.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c7o76mentco87@corp.supernews.com>
Don Geddis wrote:
> You've taken the first step towards a larger world, where you might
> consider some hypothetical future programming language that offers the
> benefits of BOTH static and dynamic typing.  There is no reason, in
> principle, why the two styles could not co-exist (in some future
> language).  

I think the two styles have co-existed for some time now, to the extent that
it is no longer possible to call a language dynamic or static. OCaml has an
interactive top-loop, dynamic code loading, inferred open sum types, macros
and many other dynamic features. Stalin-compiled scheme is not interactive,
does not support any dynamic features and is basically completely static.

I think we can all agree that doing type checking at compile time is a good
thing. SBCL does it very well considering how dynamic its target language
is.

I think we can all agree that it should be possible to construct and
manipulate data structures with no type-related overhead. That is free in a
dynamic language and s-exprs let you create ad-hoc data structures as
easily as possible:

  '(+ (* a x x) (* b x) c)

The nearest OOP equivalent is absurd in most mainstream languages (C++,
Java, C#):

#include <string>
#include <list>

using namespace std;

class Expr {};
class Sym : public Expr, string {
public:
  Sym(const char *s) : string(s) {};
};
class Seq : public Expr, list<Expr *> {
public:
  Seq() {};
  Seq(Expr *a) {
    push_back(a);
  };
  Seq(Expr *a, Expr *b) {
    push_back(a);
    push_back(b);
  };
  Seq(Expr *a, Expr *b, Expr *c) {
    push_back(a);
    push_back(b);
    push_back(c);
  };
  Seq(Expr *a, Expr *b, Expr *c, Expr *d) {
    push_back(a);
    push_back(b);
    push_back(c);
    push_back(d);
  };
};

int main() {
  Expr *add = new Sym("+");
  Expr *mul = new Sym("*");
  Expr *a = new Sym("a");
  Expr *b = new Sym("b");
  Expr *c = new Sym("c");
  Expr *x = new Sym("x");
  Expr *poly = new Seq(add, new Seq(mul, a, x, x), new Seq(mul, b, x), c);
}

In SML, Haskell and F# you must explicitly declare your sum types:

  datatype expr =
    | Sym of string
    | Seq of expr list

  val a = Sym "a"
  val b = Sym "b"
  val c = Sym "c"
  val x = Sym "x"
  Seq[Sym "+", Seq[Sym "*", a, x, x], Seq[Seq "*", b, x], c]

That is more concise but it is arguably harder to evolve the data structure
in these languages because you have an explicit type declaration to keep up
to date.

In OCaml, polymorphic variants give the best of both worlds:

  `Add[`Mul[`a; `x; `x]; `Mul[`b; `x]; `c]

The type is inferred so there is zero overhead and no declaration to
maintain. The type is open so later code can extend it. The identifiers can
be overloaded. Better yet, the types are still statically checked.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fffy2kf132.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) wrote on Tue, 14 Aug 2007:
> I wrote:
>>> It's a separate discussion whether code like I've written is "useful" or
>>> not.  But some people on this thread have claimed that code which is not
>>> "well typed" (necessarily) doesn't even have semantics, and that claim is
>>> clearly
>>
>> You still don't understand, that the original request (by P Constanza, if
>> I'm right)
>
> On the contrary, I've followed this entire thread, and it is you that had a
> limited understanding of the discussion.

Have I, indeed?

>
>> was for a change in the way that _statically typed_ langauges work, because
>> 'even badly typed code has meaning'. The claim that badly typed code (or at
>> least not statically typable code) has meaning has been made for
>> __statically typed languages__. Which is utter nonsense if one knows a bit
>> about the way the semantics in these languages are defined.
>
> Any existing language already has its semantics defined.  It obviously doesn't
> make sense to talk about source code that, within a given language, is already
> well-specified to not have semantics.  Why would you think that _anyone_ would
> ever attempt to discuss such an issue?

Reread the dialog between M Blume and Pascal Constanza: Pascal is
asking why statically typed languages can't just run programs anyway
if they fail the type checker. To which M Blume replies that that
doesn't make sense. Against which Pascal insists that those programs
have semantics anyway. Program texts that fail the type checking, mind
you, in a statically typed language.


> No, of course the real question was, how _could_ some new language (either a
> brand new one, or perhaps a modification of an existing statically or
> dyanamically typed language) be defined?  

Congratulations. Now finally you succeed to ask the right
question. Which neither you nor Pascal did.

> What, in the abstract, are the benefits of static vs. dynamic
> typing?

The benefits of static typeing are primarily that you have to discuss
with a certain class of people what static typing is good for and read
their not-to-the-point "examples".

Minor benefits involve catching a certain class of errors and
documenting data flow, pre conditions and postconditions (not
exhaustively).


> Pascal's claim, which I support, is that the way semantics are defined -- by
> choice! -- in typical statically typed languages (leaving aside, for the
> moment, some of Jon Harrop's comments on some more advanced "static"
> languages), eliminates some useful language features that are valued by
> programmers who prefer dyanamically typed languages.  

It might be that those features are valued by programmers that prefer
dynamically typed languages. No doubt: There must be a reason that at
least some of them abhor static types so much (apart from the
misleading idea that "dynamic" sounds, well, cool and flexible,
whereas "static" sounds uncreative and klunky). The more intresting
question is wether the use of those "features" makes a program better
(even if more "powerful") and wether prefering those features or
requiring them makes aforesaid programmers better programmers. For my
part I've already decided. You too, seem to have made your
decision. From that perspective any discussion is useless.

> This is in contrast to an earlier claim that static typing is
> clearly the right way to go for future programming,

A contradiction only exists, if "future programming" actually needs,
e.g. eval. You're missing a logical link in your argument.

> and that in the future "of course" all respected programming
> languages will use static typing, and dynamically typed languages will only
> be good for "toy scripts".

"only be good" != "used mostly". You've already been pointed to what
Ingo realy said. Why do you insist to distort it? because without your
case breaks down or at least looses most of its urgency?


> The response to this original prediction was to show that there are code
> fragments -- not necessarily from any language currently in existence
> (although most of them run today in Lisp), but rather from some a
> hypothetical future language -- that may be useful to the programmers, but
> which are not amenable to being labelled "well typed" by a static type
> checker.

Yes, in some sense, there are constructs that won't be passed by
statical typing as we know it (Hindley-Milner e.g). (1) I'm not sure
wether it is possible to prove that any static typing algorithm won't
be able to type the fragments in questions. I doubt, because it might
give the construct a type like "universal type" (meaning the big union
type I've been talking about some days ago) and that should do the
trick mostly. The border between static typing and (emulated, perhaps
partial) dynamic typing is not so clear cut. (2) In the "usefulness"
part of your argument I see a certain weakness. As I said: I grant
that in some sense such constructs "exists" (meaning: I could give a
meaning to this "exist" by carefully identifiying "same" semantics in
(hypothetical) differently typed languages with the same syntax. But
it still has to be shown that they are useful.

> The response to _that_ example was: "badly typed code has no semantics".

"badly typed code in statically typed languages".

That response was much earlier. Since you changed the topic (and never
before said your piece about "rather from some a hypothetical future
language", this reply was quite right. If we talk about static
typeing, badly typed code has no semantics in no statically typed
language I know of (and specifically not in the ML-dialects from which
the dialog Pascal-Matthias started).

> Which is at best highly misleading (if what was meant was simply that some
> existing statically typed languages _define_ such code fragments to have no
> meaning), 

Exactly. You never said that you want to design a new language. And if
you had, a number of other questions would have to be asked.

> and at worst simply false (if something useful was meant, such as
> that no possible language could ever assign semantics to such kinds
> of code).

We didn't say so. 


>> Your attempt to shift the battle field to dynamically typed languages is
>> futile (and in my view smacks of trolldom). Just try to read back in which
>> context the orginal claim has been made and, please, stop distorting what
>> others said.

> Was the topic not about a contrast between the possibilities of static typing
> vs. dynamic typing, and their effects on language design?  How could one even

No. The topic was about "why can't I run that code anyway?".

> have such a discussion if you are unwilling to think about dynamic typing?

> How would you ever resolve such a topic by only considering existing
> languages already defined with static type checkers?

Only in the sense that "running a badly typed program" makes no sense
at all. Which was the answer Pascal and you got. Because you
formulated the wrong approach, mind you: Nobody of you two aske "Can I
extend the semantics of an existing statically typed language so that
...?" or described the "hypothetical future language" with mixed
static/dynamic typing or fallback to dynamic typin when static typing
fails.

> Without even considering possible future statically typed languages,
> much less existing dynamic languages.
>
> I really don't see what you think this discussion is about.

Nor do I know. Do you mind to sum up what your hypothesis and/or
question is at the moment? From the beginning please, so that we do
not start with a different interpretation of something somebody has
said earlier in this thread.
>
>>> silly.  It's easy to give semantics to the kind of code I wrote above, and
>>> many dynamically typed languages -- such as Lisp -- do so.
>>
>> It's not so easy to do so as extension to a statically typed language
>> AND keep features like type inference.
>
> Well ... yes.  Finally some insight arrives.

Surprise: Why did you never use the word "extension" before? I did
days ago (wondering that neither you nor Pascal where able to
formulate what you probably want).

> This whole thread might have been short circuited if the static typing fans
> had merely said: "static/dynamic typing is a tradeoff, with the following pros
> and cons, and we are part of the programming community that prefers the static
> side of the tradeoff."

Honey, we did say that from the beginning, but instead of accepting
what we claim we gain from the trade we got told that even our winings
are bogus since all that could be achieved with test as easy and
anyway. You might understand that we didn't want to leave it at that,
though I note a certain fatigue in replying.


> Instead of: "static typing is the future; all modern languages use it;

Yeah, yeah. Our fault.

> languages that use dynamic typing are relics of the past, soon to be extinct
> or only used for toy scripts.  But no educated, serious, mature programmer
> could possibly make the tradeoff that the benefits of dynamic typing are worth
> the cost of losing the static type checker.  Only ignorant, uneducated, naive
> programmers believe such primitive nonsense."

Hm. Did Ingo really write this? 

> You've taken the first step towards a larger world, where you might consider
> some hypothetical future programming language that offers the benefits of
> BOTH static and dynamic typing.  There is no reason, in principle, why the
> two styles could not co-exist (in some future language).

One of the reasons that won't come to pass is, that a language with
full dynamic typing would have to integrate an interpreter or a run
time system which would retain the full type information. And that
would mean, we'd forego the opportunity to compile to targets that are
rather different from the language itself. And we'd leave
opportunities for optimization. 

> But first, you'd have to understand the benefits that some programmers find
> in dynamically typed languages...

Eval. Well ... -- sorry, that doesn't convince me.

-- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13c4gjuaolrdd6@corp.supernews.com>
Don Geddis wrote:
> Jon Harrop <···@ffconsultancy.com> wrote on Tue, 14 Aug 2007:
>> Don Geddis wrote:
>>> So yes, there is a "whole class of programs" which the type system has
>>> (indirectly) prevented the programmer from running.
>>
>> Can you give an example? The only thing I can think of is polymorphic
>> recursion, which I've never actually needed. Have you?
> 
> I was thinking of Pascal's example with EVAL.  Perhaps I can write a
> similar one in some kind of bastard pseudocode:
> 
>         define f(x) = x + 1;
>         define g() = { define f(x) = concatenate(x," new!"); };
>         print f(5);
>         g();
>         print f("This is ");
> 
> I would expect such code, when evaluated, to print
> 
>         6
>         This is new!
> 
> I would also expect code like this to not be "well typed" in any
> reasonable
> static type checker.  I obviously don't know every statically typed
> language, but certainly the standard ones like ML or Haskell couldn't
> compile source code analogous to what I've written above.

The nearest equivalent in OCaml is to factor your code over the "f" that it
is using:

# let g () x =  x ^ "new!";;
val g : unit -> string -> string = <fun>
# let k1 f k =
    print_int (f 5);
    k(g());;
val k1 : (int -> int) -> ((string -> string) -> 'a) -> 'a = <fun>
# let k2 f =
    print_string (f "This is ");;
val k2 : (string -> string) -> unit = <fun>
# k1 (fun x -> x + 1) k2;;
6This is new!

So k1 is called with the initial "f" but it calls k2 with the result of
applying "g" to give the second "f". The two "f"s are distinct so it
doesn't matter that they have different types.

> (It's generally because they don't offer language constructs which allow
> runtime function redefinition.  But that's not an accidental omission;
> there's no obvious way to add such capability to those languages, while
> preserving the static typing properties that their communities value.)
> 
> It's a separate discussion whether code like I've written is "useful" or
> not. But some people on this thread have claimed that code which is not
> "well typed" (necessarily) doesn't even have semantics, and that claim is
> clearly
> silly.  It's easy to give semantics to the kind of code I wrote above, and
> many dynamically typed languages -- such as Lisp -- do so.

Yes.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Stephen J. Bevan
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87fy2ljx54.fsf@dnsalias.com>
Don Geddis <···@geddis.org> writes:
> I was thinking of Pascal's example with EVAL.  Perhaps I can write a similar
> one in some kind of bastard pseudocode:
>
>         define f(x) = x + 1;
>         define g() = { define f(x) = concatenate(x," new!"); };
>         print f(5);
>         g();
>         print f("This is ");
>
> I would expect such code, when evaluated, to print
>
>         6
>         This is new!

Using Common Lisp semantics sure, but using Scheme semantics I expect
it to give a runtime error when you try and add 1 to "This is".
That's because internal defines in Scheme don't work the same as
internal defines in Common Lisp.  Nothing to do with static vs dynamic
typing and everything to do with lexical scoping.  Thus, not the best
example to use to make a distinction between static and dynamic typing.
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xkbqdj4chw.fsf@hod.lan.m-e-leypold.de>
Don Geddis wrote:

> The question is what to do about such code.  Fans of static typing basically
> respond: "if my tool is unable to prove your code type-safe, then you will be
> prohibited from running it."

Not quite right. They say "if my tool is unable to type the code, then
you will be prohibited from running it".

The point is:

  - It's just "well typed" code, not "type safe". Safety is an aspect
    in typing (see Luca Cardellis tutorial paper), but dynamically
    typed languages are also "safe": There are no programs that are
    accepted by the system, but then result in "undefined behaviour"
    as in C.

  - Typing is usually decidable and a straight-forward algorithm with
    a limited run time. So "unable prove your code type-safe" is not
    the issue here: Typing positively turns up type conflicts when it
    rejects programs. It's not like in a proof assistant that it only
    cannot find a proof (because it's missing intermediate information
    and cannot exhaust the search space). "Unable" doesn't come into
    it. The type checker will positively prove that your code is not
    well typed.


Regards -- Markus
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x3ayuxzd7.fsf@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>   - It's just "well typed" code, not "type safe". Safety is an aspect
>     in typing (see Luca Cardellis tutorial paper),

Do you mean the 1984 or 1985 paper on typeful programming?  I've seen
many references to it and have been wanting to look for it.  Do you
know if it's online?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <w9sl6u3yga.fsf@hod.lan.m-e-leypold.de>
Paul Rubin wrote:

> ·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
>>   - It's just "well typed" code, not "type safe". Safety is an aspect
>>     in typing (see Luca Cardellis tutorial paper),
>
> Do you mean the 1984 or 1985 paper on typeful programming?  I've seen
> many references to it and have been wanting to look for it.  Do you
> know if it's online?

Yes. The paper I'm referring to is online. Please use Google on the authors name.

Regards -- M.
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xk5s59kgj.fsf@ruckus.brouhaha.com>
·····································@ANDTHATm-e-leypold.de (Markus E.L. 2) writes:
> >>     in typing (see Luca Cardellis tutorial paper),
> > Do you mean the 1984 or 1985 paper on typeful programming?  > 
> Yes. The paper I'm referring to is online. Please use Google on the
> authors name.

I found the following, which might not be the right one but which is
very good, addressed to non-specialists so it's easy to read. 

http://research.microsoft.com/Users/luca/Papers/TypeSystems.pdf
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5hnrn1F3l628sU1@mid.individual.net>
Markus E.L. 2 wrote:
> Pascal Costanza wrote:
> 
>> Ingo Menger wrote:
>>> On 2 Aug., 10:50, Pascal Costanza <····@p-cos.net> wrote:
>>>> Ingo Menger wrote:
>>>>> On 2 Aug., 10:12, Pascal Costanza <····@p-cos.net> wrote:
>>>>>> Ingo Menger wrote:
>>>>>>> One can understand type inference as a feature that sorts out programs
>>>>>>> that can't possibly run without errors and leaves those that have
>>>>>>> errors most probably.
>>>>>> No, a static type system that enforces type soundness always has to
>>>>>> reject certain programs that may succeed at runtime.
>>>>> This is a question of semantics.
>>>>> One can define a semantic, where nonsensical actions like passing the
>>>>> empty list to a function that extracts the second element from a
>>>>> tuple, would  result in some more or less meaningful value.
>>>>> An example of a language with such a forgiving semantic is perl.
>>>>> Certain nonsensical constructs produce the value undef and the program
>>>>> can go on with the undef value.
>>>> No, it's more than that.
>>> Yes, it's something like that.
>>> At least it makes no sense to speak of "programs" that "succeed" at
>>> runtime without referring to a semantic.
>> All static type systems have to reject programs that may succeed at
>> runtime. The assumption of defenders of static type systems is that
> 
> Care to give an example?  I'm having a bit of trouble to understand
> what "succeed" might mean regarding programs that have been
> "reject(ed)" on the ground that they weren't well type, because there
> is no semantics defined for the rejected programs. 

...because you're not thinking outside of the box.

> Care to rephrase your assertion in terms that are meaningful? What
> does succeed mean there?

"Can be executed in a meaningful way."

>> An important case is when programming languages provide reflection at
>> runtime, including both introspection and intercession. Especially the
>> latter makes it at least extremely difficult to statically type-check
>> programs written in such languages. There is an important class of
>> programs whose behaviors need to be updated and changed at runtime,
>> and such programs rely on reflection at runtime. I am not aware of any
>> static type system that is able to check such programs before
>> deployment, and I am convinced that they will never be able to do so,
>> so I don't bother.
> 
> I'm a bit mystified here: Java has a static type system. Java has
> reflection. Perhaps I've misunderstood something.

No, Java doesn't have reflection. Java has introspection - that is, it 
allows programs to inspect their internals to a certain degree. 
Reflection consists of introspection and intercession, though, not only 
of introspection. Intercession allows you to change the internals of a 
program from within that program itself. (Java has a very naive and 
crippled way of doing a rather uninteresting subset of intercession in 
the form of dynamic proxy classes.)

>> That's my personal conviction and I don't want to argue about personal
>> convictions. 
> 
> As long as you voice them in public and they are at odd with what
> other people percieve as well known technical or scientific facts (the
> question what succeed means, in example) you'll probably have to
> discuss them: Or live with other people rejecting them vocally with
> you staying silent. Certainly (when I've time) I won't let them stand
> like that.

What you seem to perceive as "well known technical or scientific facts" 
are not "well known technical and scientific facts." The fact that 
static type systems have to reject otherwise well-behaved programs is 
actually well known by people doing research on static type systems.



Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <tny7golp9p.fsf@hod.lan.m-e-leypold.de>
Pascal Costanza wrote:

>> "reject(ed)" on the ground that they weren't well type, because there
>> is no semantics defined for the rejected programs.
>
> ...because you're not thinking outside of the box.

Wow. I actually see the advent of formal (or even semi-formal)
language specification as some kind of progress. The "but everyone
should see that it would be _logical_ to do this if that is happening"
approach to language design, which advocates an arbitrary number of ad
hoc special cases in the language, should be eradicated, IMHO.

>> Care to rephrase your assertion in terms that are meaningful? What
>> does succeed mean there?
>
> "Can be executed in a meaningful way."

I've written something like this as a shell script wrapper. It puts an
OCaml-Program through the copiler and if that fails with a type error
it compilers

 module NoOp = struct end;;

instead. Since this is meaningful (at least out of the box) I suppose
we already have the language/compiler you're craving for.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bhs46nbjngh15@corp.supernews.com>
Pascal Costanza wrote:
> The assumption of defenders of static type systems is that the 
> kinds of programs that may succeed but are nevertheless rejected are
> marginal and uninteresting. The assumption of defenders of dynamic type
> systems is that they are not.

Polymorphic recursion is the main source of rejected but correct programs in
OCaml. From the mailing list archives, there are ~50 messages mentioning
polymorphic recursion out of 15,000 messages.

> All static type systems have to reject programs that may succeed at
> runtime.

This is another incorrect generalization. You probably mean that Java's
static type system rejects some correct programs.

> ...
> I just want to inform you about the fact that there are people who care
> about the programs that static type systems cannot check and who
> understand what the underlying issues are.

Given the above, I think the conclusion is obvious.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Sacha
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2Wksi.30258$h43.896039@phobos.telenet-ops.be>
Pascal Costanza wrote:
> Ingo Menger wrote:
>> The point is that one day one has to pay for certain kinds of
>> flexibility ...
>> The need for testing is, of course, the higher the more dynamic the
>> language.
> 
> Maybe, maybe not. Largely irrelevant here, though.
> 
> 
> Pascal
> 

Any reasonably sized program will need behavior testing anyways. Type 
errors will be catch in there, that's a non-issue IMO.

I personally prefer using lisp, but it seems you all are pushing the 
"type check" thing for the ML style type systems, while I think their 
best feature isn't about catching errors. These type systems are 
creative tools, they foremost help producing solutions through their 
expressiveness. Catching type errors is a nice side effect, but i don't 
think it's all about that.

Sacha
From: Ingo Menger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186067248.011451.154240@j4g2000prf.googlegroups.com>
On 2 Aug., 15:24, Sacha <····@address.spam> wrote:
> Pascal Costanza wrote:
> > Ingo Menger wrote:
> >> The point is that one day one has to pay for certain kinds of
> >> flexibility ...
> >> The need for testing is, of course, the higher the more dynamic the
> >> language.
>
> > Maybe, maybe not. Largely irrelevant here, though.
>
> > Pascal
>
> Any reasonably sized program will need behavior testing anyways. Type
> errors will be catch in there, that's a non-issue IMO.

No, but it may be a symptom of a design flaw.
And then, the question is, how will the type error be fixed? Isn't
there a temptation to use some quick but dirty workaround (i.e., when
the fst function is passed an empty list, we just return -1 or NIL or
whatever, since we have not the time to look in this 25k LOC beast how
it comes that this call is made), to get it working, to meet a
deadline or such?


>
> These type systems are
> creative tools, they foremost help producing solutions through their
> expressiveness. Catching type errors is a nice side effect, but i don't
> think it's all about that.

I agree.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-BC6C2D.17315202082007@news-europe.giganews.com>
In article <························@j4g2000prf.googlegroups.com>,
 Ingo Menger <···········@consultant.com> wrote:

> On 2 Aug., 15:24, Sacha <····@address.spam> wrote:
> > Pascal Costanza wrote:
> > > Ingo Menger wrote:
> > >> The point is that one day one has to pay for certain kinds of
> > >> flexibility ...
> > >> The need for testing is, of course, the higher the more dynamic the
> > >> language.
> >
> > > Maybe, maybe not. Largely irrelevant here, though.
> >
> > > Pascal
> >
> > Any reasonably sized program will need behavior testing anyways. Type
> > errors will be catch in there, that's a non-issue IMO.
> 
> No, but it may be a symptom of a design flaw.
> And then, the question is, how will the type error be fixed?

By fixing it and running the tests again.

> Isn't
> there a temptation to use some quick but dirty workaround (i.e., when
> the fst function is passed an empty list, we just return -1 or NIL or
> whatever, since we have not the time to look in this 25k LOC beast how
> it comes that this call is made), to get it working, to meet a
> deadline or such?

Puh, that's a nicely constructed situation. Couldn't be worse. ;-)
Deadline, no time, lots of code, an error, incompetent people. Wow.

My home webserver is more code than that. All CL. I usually
don't get type errors at runtime. If the webserver gets an error
(any error), the webserver sends me a mail with the error
description and a detailed backtrace.
The backtrace contains all the stack frames with their data.
I look at the function name, do (ed 'some-function), browse
around, change the code, take that code snippet, log on
to the web server computer, get a REPL into the running web server,
enter the code, done.

> 
> 
> >
> > These type systems are
> > creative tools, they foremost help producing solutions through their
> > expressiveness. Catching type errors is a nice side effect, but i don't
> > think it's all about that.
> 
> I agree.

-- 
http://lispm.dyndns.org
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b1b085$0$1613$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Costanza wrote:
> in dynamic languages, preference is given
> to flexibility, especially with regard to testing and reflective
> capabilities in a language.

You can still support features like reflection from a static language, of
course. F# does this.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5sfba5j.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:

> If a language like haskell or an ml had a mode that allowed type-incorrect
> and incomplete programs to be run for testing (without the distraction of
> having to write stubs first) it would be much more attractive to me and, I
> suspect others.
> 
> Then there's the issue of minimal syntax which allows a macro system that
> uses the same langauge for macros as for functions ...

I have a related question, namely if there is anything in Common Lisp which
prohibits extending a CL implementation with a dynamic type system, such
that compiling code gives warnings if type conflicts _with the current
state of the system_ are detected.

As much as I see, in CL we can already declare types whenever we want, so
that it is mainly an implementation issue how much type inference is done.
(CMUCL/SBCL already do something, but much more could be done, e.g. doing
type inference also for generic functions.)

If a CL implementation is extended in such a way, we might have (almost)
all the benefits without giving up dynamicity and/or syntax.  (One could
even imagine a next step where the compiler also tries to prove the
correctness of code (like ACL2 does), giving a warning, if it does not
succeed.)

Nicolas
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8q99s$n26$2@online.de>
Raffael Cavallaro schrieb:
> If a language like haskell or an ml had a mode that allowed 
> type-incorrect and incomplete programs to be run for testing (without 
> the distraction of having to write stubs first) it would be much more 
> attractive to me and, I suspect others.

I'd find that quite attractive, too.
I prefer a slightly more nitpicking compiler over unburdened exploratory 
programming, but that's probably really just personal preference.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46af0e90$0$1604$ed2619ec@ptn-nntp-reader02.plus.net>
Raffael Cavallaro wrote:
> There exist domains where the entities and their relationships do not
> yet have a clear one-to-one mapping with abstact data types and
> operations on them. One rather discovers one possible representation of
> the domain entities and their relationships as one builds the program.
> More importantly, it is very helpful to be able to have this model be
> internally *inconsistent* during development as it takes shape. It
> prevents the distraction from discovering/building the model which is
> caused by having to keep it perfectly internally consistent at all
> times just to satisfy a type-checking compiler. There are definite
> benefits of being able to run and test and expand what to a
> type-checking compiler would be considered an incorrect program,
> something that such a type-checking compiler would not let you run at
> all. iirc, joe marshall had some interesting posts on this the last go
> round a couple of years ago.

Can you reference some specific examples of this?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-12B219.07492331072007@news-europe.giganews.com>
In article <··············@my.address.elsewhere>,
 Matthias Blume <····@my.address.elsewhere> wrote:

> Andr� Thieme <······························@justmail.de> writes:
> 
> > Matthias Blume schrieb:
> >> Raffael Cavallaro
> >> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> >>
> >>> partially known range of inputs -> combination of human interactive
> >>> and algorithmic processing -> unpredictable range of outputs
> >>>
> >>>
> >>> If this is the future of computing, then the focus on static typing is
> >>> a massive effort in solving the wrong problem.
> >>
> >> I, for one, think that research in static typing is exactly the
> >> /right/ effort.  In my experience (and I do have experience with both
> >> paradigms), I find it easier and faster to perform "exploratory"
> >> programming with a static type system to help me along.
> >
> > What about the Erlang approach?
> > It is dynamically typed, but with Dialyzer [1] one can analyze the
> > code. For Lisp Qi can do these kinds of things in some sense, but one
> > could develop a Lisp-Dialyzer as well. It should be able to talk about
> > more or less all type errors. In cases where the program is not sure if
> > the programmer intended changing the type during runtime it could simply
> > ask her, or spit out warnings.
> > If the compilers get such a mode they could also get out more speed
> > from dynamic code.
> 
> I'm sure all these things are fine tools.  However, they don't address
> what I am getting at.  When I write code, I spend most of my time
> thinking about data-structural invariants.

I can't say that I spend much time thinking about invariants
while programming.

Would you care to elaborate a bit about your programming
process? A little example perhaps?


>  What a type system such as
> ML's lets me do is write these invariants down in such a way that the
> compiler can then verify that my code actually adheres to them.  Thus,
> the type system provides me with (a) the necessary notation for
> writing down invariants, and (b) with the guarantee that violations of
> these invariants are discovered early in the development cycle.
> 
> Some here (especially Mr. Joswig) have harped a lot on the
> (indisputable) fact that in any given type system there necessarily
> exist invariants that cannot be expressed.  Moreover, there are
> undeniably a lot of interesting invariants which would be worth
> verifying statically, but which cannot be expressed in most existing
> type systems.
> 
> But just because there are things we cannot do there is no reason to
> give up on the many that we can do quite well.  I won't decide against
> taking the train from Berlin to Hamburg just because the tracks don't
> also extend to New York City or to the moon.  Primality, exhaustive
> rule sets, etc. are mere straw men -- put up to be knocked down.  Many
> much simpler invariants exist in pretty much all programs, and modern
> type systems are good at expressing and enforcing them.  Type
> inference is only part of the picture: the compiler figures out some
> of the more obvious invariants based on the way the code is written,
> and then makes sure that other parts of the program are consistent.
> But the real power comes with the ability to define your own type
> abstractions.  The ML module system and similar facilities in other
> HOT languages are excellent tools to this end.
> 
> Matthias

-- 
http://lispm.dyndns.org
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8j8x8wlajg.fsf@hod.lan.m-e-leypold.de>
> In article <··············@my.address.elsewhere>,
>  Matthias Blume <····@my.address.elsewhere> wrote:
>
>> Andr� Thieme <······························@justmail.de> writes:
>> 
>> > Matthias Blume schrieb:
>> >> Raffael Cavallaro
>> >> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>> >>
>> >>> partially known range of inputs -> combination of human interactive
>> >>> and algorithmic processing -> unpredictable range of outputs
>> >>>
>> >>>
>> >>> If this is the future of computing, then the focus on static typing is
>> >>> a massive effort in solving the wrong problem.
>> >>
>> >> I, for one, think that research in static typing is exactly the
>> >> /right/ effort.  In my experience (and I do have experience with both
>> >> paradigms), I find it easier and faster to perform "exploratory"
>> >> programming with a static type system to help me along.
>> >
>> > What about the Erlang approach?
>> > It is dynamically typed, but with Dialyzer [1] one can analyze the
>> > code. For Lisp Qi can do these kinds of things in some sense, but one
>> > could develop a Lisp-Dialyzer as well. It should be able to talk about
>> > more or less all type errors. In cases where the program is not sure if
>> > the programmer intended changing the type during runtime it could simply
>> > ask her, or spit out warnings.
>> > If the compilers get such a mode they could also get out more speed
>> > from dynamic code.
>> 
>> I'm sure all these things are fine tools.  However, they don't address
>> what I am getting at.  When I write code, I spend most of my time
>> thinking about data-structural invariants.
>
> I can't say that I spend much time thinking about invariants
> while programming.

So why are you asking for a type system in which invariants are (I
assume) checked automatically? (like: primality, certain properties of
the rewriting rule set you mentioned?).

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-0DDC75.02374001082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L.) wrote:

> > In article <··············@my.address.elsewhere>,
> >  Matthias Blume <····@my.address.elsewhere> wrote:
> >
> >> Andr� Thieme <······························@justmail.de> writes:
> >> 
> >> > Matthias Blume schrieb:
> >> >> Raffael Cavallaro
> >> >> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> >> >>
> >> >>> partially known range of inputs -> combination of human interactive
> >> >>> and algorithmic processing -> unpredictable range of outputs
> >> >>>
> >> >>>
> >> >>> If this is the future of computing, then the focus on static typing is
> >> >>> a massive effort in solving the wrong problem.
> >> >>
> >> >> I, for one, think that research in static typing is exactly the
> >> >> /right/ effort.  In my experience (and I do have experience with both
> >> >> paradigms), I find it easier and faster to perform "exploratory"
> >> >> programming with a static type system to help me along.
> >> >
> >> > What about the Erlang approach?
> >> > It is dynamically typed, but with Dialyzer [1] one can analyze the
> >> > code. For Lisp Qi can do these kinds of things in some sense, but one
> >> > could develop a Lisp-Dialyzer as well. It should be able to talk about
> >> > more or less all type errors. In cases where the program is not sure if
> >> > the programmer intended changing the type during runtime it could simply
> >> > ask her, or spit out warnings.
> >> > If the compilers get such a mode they could also get out more speed
> >> > from dynamic code.
> >> 
> >> I'm sure all these things are fine tools.  However, they don't address
> >> what I am getting at.  When I write code, I spend most of my time
> >> thinking about data-structural invariants.
> >
> > I can't say that I spend much time thinking about invariants
> > while programming.
> 
> So why are you asking for a type system in which invariants are (I
> assume) checked automatically? (like: primality, certain properties of
> the rewriting rule set you mentioned?).

are these 'data structural invariants'?

I still wait for an explanation of Mr. Blume's 'development process'.
See the question you haven't quoted.

Btw. your quoting is somehow fucked up. You constantly miss to mention
the person your are quoting. I wrote the sentence you are quoting,
yet my name is not mentioned above. Your 'Some cool user agent (SCUG)'
(that's in the header) does not seem to be very cool...

> 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2myxcvtit.fsf@my.address.elsewhere>
Rainer Joswig <······@lisp.de> writes:

> I still wait for an explanation of Mr. Blume's 'development process'.

You will have to google for it.  I have actually explained this in the
past -- several times over (within similar threads).  I'm not going to
do it again.  (Have to cut down on the time I'm wasting on this...)

Matthias
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2ejinwzp5.fsf@my.address.elsewhere>
Matthias Blume <····@my.address.elsewhere> writes:

> Rainer Joswig <······@lisp.de> writes:
>
>> I still wait for an explanation of Mr. Blume's 'development process'.
>
> You will have to google for it.  I have actually explained this in the
> past -- several times over (within similar threads).  I'm not going to
> do it again.  (Have to cut down on the time I'm wasting on this...)

Ok, this may be a bit difficult to google for, so here it is:

  http://groups.google.com/group/comp.lang.scheme/msg/52f3bb337a548307

Notice that if I were to write this up again for you, I would make
sure to mention abstract types.  Much of what I say in the above article
about datatypes works in the general case with abstract types as well.

Matthias
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-D3EA7B.08102501082007@news-europe.giganews.com>
In article <··············@my.address.elsewhere>,
 Matthias Blume <····@my.address.elsewhere> wrote:

> Rainer Joswig <······@lisp.de> writes:
> 
> > I still wait for an explanation of Mr. Blume's 'development process'.
> 
> You will have to google for it.  I have actually explained this in the
> past -- several times over (within similar threads).  I'm not going to
> do it again.  (Have to cut down on the time I'm wasting on this...)
> 
> Matthias

Ah. Google replaces discussion and giving references, now.
How sad.

-- 
http://lispm.dyndns.org
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m2abtbwz1x.fsf@my.address.elsewhere>
Rainer Joswig <······@lisp.de> writes:

> In article <··············@my.address.elsewhere>,
>  Matthias Blume <····@my.address.elsewhere> wrote:
>
>> Rainer Joswig <······@lisp.de> writes:
>> 
>> > I still wait for an explanation of Mr. Blume's 'development process'.
>> 
>> You will have to google for it.  I have actually explained this in the
>> past -- several times over (within similar threads).  I'm not going to
>> do it again.  (Have to cut down on the time I'm wasting on this...)
>> 
>> Matthias
>
> Ah. Google replaces discussion and giving references, now.
> How sad.

I'm not sure why this is sad.  You want me to waste my time typing
this in all over again, even though realistically speaking the chances
that it would do any good are nil, and even though it is already
publicly available?

I have not seen any effort on your side to try and understand the
other position.  You have ignored my repeated questions regarding your
expertise with modern typed languages.  I am drawing my own
conclusions from this.  You may be a great Lisp programmer, but as
long as you don't give something else a /serious/ try that involves
more than reading some web pages, you will probably never really
understand what I am saying.  (Notice that this is not meant as an
insult or a comment on your mental capabilities.  I am merely speaking
from my own experience.  I sounded just like you before I found myself
in a position where I couldn't avoid giving ML a serious try.)

I now really regret that just moments ago I posted a link to the
article in question.

Matthias
From: ······@corporate-world.lisp.de
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185968579.732563.256740@w3g2000hsg.googlegroups.com>
On Aug 1, 8:26 am, Matthias Blume <····@my.address.elsewhere> wrote:
> Rainer Joswig <······@lisp.de> writes:
> > In article <··············@my.address.elsewhere>,
> >  Matthias Blume <····@my.address.elsewhere> wrote:
>
> >> Rainer Joswig <······@lisp.de> writes:
>
> >> > I still wait for an explanation of Mr. Blume's 'development process'.
>
> >> You will have to google for it.  I have actually explained this in the
> >> past -- several times over (within similar threads).  I'm not going to
> >> do it again.  (Have to cut down on the time I'm wasting on this...)
>
> >> Matthias
>
> > Ah. Google replaces discussion and giving references, now.
> > How sad.
>
> I'm not sure why this is sad.  You want me to waste my time typing
> this in all over again, even though realistically speaking the chances
> that it would do any good are nil, and even though it is already
> publicly available?

Come on, giving the reference saves both lots of time.

> I have not seen any effort on your side to try and understand the
> other position.  You have ignored my repeated questions regarding your
> expertise with modern typed languages.  I am drawing my own
> conclusions from this.  You may be a great Lisp programmer, but as
> long as you don't give something else a /serious/ try that involves
> more than reading some web pages, you will probably never really
> understand what I am saying.  (Notice that this is not meant as an
> insult or a comment on your mental capabilities.  I am merely speaking
> from my own experience.  I sounded just like you before I found myself
> in a position where I couldn't avoid giving ML a serious try.)

I find especially Haskell very useful. Ever since I used Miranda,
something
like twenty years ago. I followed then mostly the developments of
Haskell
(not so much the ML-branch of languages).

These languages are just not the successor to Lisp. The word 'modern'
is also mostly political. I tend to avoid it. It is a very
different style of programming, but I was a bit playing 'devil's
advocate',
since I fear in 'real world' type systems of FPLs are a) not
widely used, b) difficult to use for many and c) often oversold
by its proponents (like Mr. Harrop).

> I now really regret that just moments ago I posted a link to the
> article in question.

That link is in the direction I was asking about. I think information
like that is extremely important and should be explained in much
more detail. I thought that you might have that readily available...
I'm still interested if you have more material in that direction.

>
> Matthias
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b1b1a9$0$1621$ed2619ec@ptn-nntp-reader02.plus.net>
······@corporate-world.lisp.de wrote:
> since I fear in 'real world' type systems of FPLs are
> a) not widely used,

I'm not sure about this. A large proportion of functional programmers choose
statically-typed languages. Use of at least OCaml is widespread in
industry. There is also considerable interest in F#.

> b) difficult to use for many and

Perhaps.

> c) often oversold by its proponents

Subjective.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <851wen8v2y.fsf@hod.lan.m-e-leypold.de>
Rainer J said:
> In article <··············@my.address.elsewhere>,
>  Matthias Blume <····@my.address.elsewhere> wrote:
>
>> Rainer Joswig <······@lisp.de> writes:
>> 
>> > I still wait for an explanation of Mr. Blume's 'development process'.
>> 
>> You will have to google for it.  I have actually explained this in the
>> past -- several times over (within similar threads).  I'm not going to
>> do it again.  (Have to cut down on the time I'm wasting on this...)
>> 
>> Matthias
>
> Ah. Google replaces discussion and giving references, now.
> How sad.

Not really, Rainer. What you and Raffaelo had to say on static typing
exposed a sad lack of understanding on an established programming
methodology (I'm not talking about "liking it" but about an abstract
understanding how the proponents of the method suggest to apply it). I
don't think that c.l.f of c.l.l is a tutorial on such matters.

I totally agree with Matthias on this: Those who are lacking the basic
knowledge in question will have to catch up on their own.

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-DEA2F5.13070601082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L.) wrote:

> Rainer J said:
> > In article <··············@my.address.elsewhere>,
> >  Matthias Blume <····@my.address.elsewhere> wrote:
> >
> >> Rainer Joswig <······@lisp.de> writes:
> >> 
> >> > I still wait for an explanation of Mr. Blume's 'development process'.
> >> 
> >> You will have to google for it.  I have actually explained this in the
> >> past -- several times over (within similar threads).  I'm not going to
> >> do it again.  (Have to cut down on the time I'm wasting on this...)
> >> 
> >> Matthias
> >
> > Ah. Google replaces discussion and giving references, now.
> > How sad.
> 
> Not really, Rainer. What you and Raffaelo had to say on static typing
> exposed a sad lack of understanding on an established programming
> methodology (I'm not talking about "liking it" but about an abstract
> understanding how the proponents of the method suggest to apply it). I
> don't think that c.l.f of c.l.l is a tutorial on such matters.

I know about static typing.

I was asking how Mr. Blume writes code, given that he
says 'structural invariants' are important for him. What
is a typical process from seeing a problem to working code?
Especially since he is using/proposing functional programming
languages with static typing. Since I don't know how
he develops code, I was asking. Somehow hoping that I
get a good answer from somebody who is qualified and
maybe has spend some thought about programming methodology
with static functional programming languages. Lots of
people have different approaches, and I thought maybe
I could learn something. But all get is a vague reference
to google and someone (a troll?) telling me I have a lack of
understanding.

> I totally agree with Matthias on this: Those who are lacking the basic
> knowledge in question will have to catch up on their own.

Doesn't surprise me.

> 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <posl734d6v.fsf@hod.lan.m-e-leypold.de>
> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L.) wrote:
>
>> Rainer J said:
>> > In article <··············@my.address.elsewhere>,
>> >  Matthias Blume <····@my.address.elsewhere> wrote:
>> >
>> >> Rainer Joswig <······@lisp.de> writes:
>> >> 
>> >> > I still wait for an explanation of Mr. Blume's 'development process'.
>> >> 
>> >> You will have to google for it.  I have actually explained this in the
>> >> past -- several times over (within similar threads).  I'm not going to
>> >> do it again.  (Have to cut down on the time I'm wasting on this...)
>> >> 
>> >> Matthias
>> >
>> > Ah. Google replaces discussion and giving references, now.
>> > How sad.
>> 
>> Not really, Rainer. What you and Raffaelo had to say on static typing
>> exposed a sad lack of understanding on an established programming
>> methodology (I'm not talking about "liking it" but about an abstract
>> understanding how the proponents of the method suggest to apply it). I
>> don't think that c.l.f of c.l.l is a tutorial on such matters.
>
> I know about static typing.
>
> I was asking how Mr. Blume writes code, given that he
> says 'structural invariants' are important for him. What
> is a typical process from seeing a problem to working code?
> Especially since he is using/proposing functional programming
> languages with static typing. Since I don't know how
> he develops code, I was asking. Somehow hoping that I
> get a good answer from somebody who is qualified and
> maybe has spend some thought about programming methodology
> with static functional programming languages. 

Well -- I'm a bit at a loss here which answer would really satisfy
you. Probably the most concise answer would be "Mr. Blume writes code
by typing on the keyboard". If on the other side ask for examples how
invariants and static typing is useful in the development process (and
it is especially in incremental and exploratory development): This is
a broad subject and I'd be happy to continue the disussion on that, if
it weren't that I've to consider you a lost cause: (a) lacking basic
knowledge on the subject matter, (b) unwilling to study up on the
missing topics, (c) calling your opponent a troll. 

Those three factors together make for really bad discussions on usenet
and almost everywhere else.

> Lots of people have different approaches, and I thought maybe I
> could learn something. But all get is a vague reference to google
> and someone (a troll?) telling me I have a lack of understanding.

But you _do_ have a lack of understanding as the example you used
conclusively shows. Your expectation that a type system somehow would
prove at the moment of compilation that a system of rewriting actually
has a minimal normal form or something like this is (a) unrealistic
(because it would mean that there is a general theorem proofer in the
typing algorithm), (b) completely ignores how type systems are used in
practice and where they actually pay off. The latter makes me suspect
that you have never actually seen how type systems are used or used
them yourself. Since this is a standard technique and writing a
tutorial on that just for this thread will cost the author at least 2
(if not 10 or 15) hours I don't think that it is reasonable to expect
anyone to write this tutorial.

Somehow I find myself in a position similar to that that my opponent
has never heard of the halting theorem and now says "if you really
think that is important, explain it to me now and tell me where it is
actually used". It would be a waste of time to do so, not because the
esteemed opponent is an idiot but because it would be vastly more
efficient if he consulted the standard literature / sources on this
topic.

As far as "some (a troll?)" goes, I'll take it for what it is intended
to be: A deliberate insult. I suppose you think this is the right
method to entice people to get the answers you pretend to crave? Given
the thread on "JH is a spammer" etc. I can only assume in c.l.l
"troll" has been redefined as "somebody whose replies I don't like".

So Rainer, you got the last word now. I already said elsewhere that I
think you're lacking manners (see the "Jon Harrop is a spammer
affair") and that I don't understand your attitude in some
respects.

>> I totally agree with Matthias on this: Those who are lacking the basic
>> knowledge in question will have to catch up on their own.
>
> Doesn't surprise me.

Why?

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-911B8F.18140302082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L.) wrote:

I think this was from me?

> > In article <··············@hod.lan.m-e-leypold.de>,
> >  ·····································@ANDTHATm-e-leypold.de (Markus 
> >  E.L.) wrote:
> >
> >> Rainer J said:
> >> > In article <··············@my.address.elsewhere>,
> >> >  Matthias Blume <····@my.address.elsewhere> wrote:
> >> >
> >> >> Rainer Joswig <······@lisp.de> writes:
> >> >> 
> >> >> > I still wait for an explanation of Mr. Blume's 'development process'.
> >> >> 
> >> >> You will have to google for it.  I have actually explained this in the
> >> >> past -- several times over (within similar threads).  I'm not going to
> >> >> do it again.  (Have to cut down on the time I'm wasting on this...)
> >> >> 
> >> >> Matthias
> >> >
> >> > Ah. Google replaces discussion and giving references, now.
> >> > How sad.
> >> 
> >> Not really, Rainer. What you and Raffaelo had to say on static typing
> >> exposed a sad lack of understanding on an established programming
> >> methodology (I'm not talking about "liking it" but about an abstract
> >> understanding how the proponents of the method suggest to apply it). I
> >> don't think that c.l.f of c.l.l is a tutorial on such matters.
> >
> > I know about static typing.
> >
> > I was asking how Mr. Blume writes code, given that he
> > says 'structural invariants' are important for him. What
> > is a typical process from seeing a problem to working code?
> > Especially since he is using/proposing functional programming
> > languages with static typing. Since I don't know how
> > he develops code, I was asking. Somehow hoping that I
> > get a good answer from somebody who is qualified and
> > maybe has spend some thought about programming methodology
> > with static functional programming languages. 
> 
> Well -- I'm a bit at a loss here which answer would really satisfy
> you. Probably the most concise answer would be "Mr. Blume writes code
> by typing on the keyboard". If on the other side ask for examples how
> invariants and static typing is useful in the development process (and
> it is especially in incremental and exploratory development): This is
> a broad subject and I'd be happy to continue the disussion on that, if
> it weren't that I've to consider you a lost cause: (a) lacking basic
> knowledge on the subject matter, (b) unwilling to study up on the
> missing topics, (c) calling your opponent a troll. 

You always move away from the topic (software development
with SFPLs) and write lot's drivel like this. Why is that?

> Those three factors together make for really bad discussions on usenet
> and almost everywhere else.
> 
> > Lots of people have different approaches, and I thought maybe I
> > could learn something. But all get is a vague reference to google
> > and someone (a troll?) telling me I have a lack of understanding.
> 
> But you _do_ have a lack of understanding as the example you used
> conclusively shows. Your expectation that a type system somehow would
> prove at the moment of compilation that a system of rewriting actually
> has a minimal normal form or something like this is (a) unrealistic
> (because it would mean that there is a general theorem proofer in the
> typing algorithm),

Look, I never expected that. I just point out that there
are a lots of constraints on data and functions that
can't be expressed in a trivial or practical way in a type system.
I have been asked for a few examples and gave that list.
These constraints that are not practically expressed
in most programming languages, but still can be very interesting
and still may need to be checked before the software
runs.

> (b) completely ignores how type systems are used in
> practice and where they actually pay off. The latter makes me suspect
> that you have never actually seen how type systems are used or used
> them yourself. Since this is a standard technique and writing a
> tutorial on that just for this thread will cost the author at least 2
> (if not 10 or 15) hours I don't think that it is reasonable to expect
> anyone to write this tutorial.

I don't know what you expect from the newsgroup here? I'd expect
that asking for some description of programming practice
is not unusual.
I was hoping that there was already some good description
available. Mr. Blume later pointed out some earlier post
which contained some useful information.

There are people who find this topic interesting.
Like Peter Seibel.
His last book was 'Practical Common Lisp'. You might
want to check it out. It is pretty good.
http://www.gigamonkeys.com/book/

He is writing a new book: 'Coders at Work'.
Why not nominate somebody from the SFPL community to be
interviewed for the book? Right now Simon Peyton Jones
is #16 on the list of interesting coders.

http://codersatwork.com/
"This is the web site for a new book I'm working on
for Apress which will contain interviews with around
sixteen of the most interesting computer programmers alive
today. It will be a companion volume to Apress's Founders at
Work by Jessica Livingston, and, like that book, a continuation
of the tradition started by the Paris Review in 1953 when they
published a Q&A interview with novelist E.M. Forster, inaugurating
a series of interviews later titled "Writers at Work". As the words
"at work" suggest, my goal is to focus the interviews on how
subjects tackle the day-to-day work of programming. Which is not
to say we won't touch on other topics such as how they
became great programmers, how they recognize programming talent
in others, and what kinds of problems they find most interesting."

... lot's of Markus' drivel deleted ...

Rereading your post, I find not a single useful information in there.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <esd4y5przn.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:
>> Well -- I'm a bit at a loss here which answer would really satisfy
>> you. Probably the most concise answer would be "Mr. Blume writes code
>> by typing on the keyboard". If on the other side ask for examples how
>> invariants and static typing is useful in the development process (and
>> it is especially in incremental and exploratory development): This is
>> a broad subject and I'd be happy to continue the disussion on that, if
>> it weren't that I've to consider you a lost cause: (a) lacking basic
>> knowledge on the subject matter, (b) unwilling to study up on the
>> missing topics, (c) calling your opponent a troll. 
>
> You always move away from the topic (software development
> with SFPLs) and write lot's drivel like this. Why is that?

Because you (a) that was how I came into the thread by defending what
I consider Jon's right of free speech against the Jon-is-a-spammer
movement and (b) because you started the thread (well, the immediate
precursor) by copying a post of Jon from c.l.l to c.l.f after flagging
Jon post there as a troll. Actually an action by you I still don't
understand.

>> (b) completely ignores how type systems are used in
>> practice and where they actually pay off. The latter makes me suspect
>> that you have never actually seen how type systems are used or used
>> them yourself. Since this is a standard technique and writing a
>> tutorial on that just for this thread will cost the author at least 2
>> (if not 10 or 15) hours I don't think that it is reasonable to expect
>> anyone to write this tutorial.
>
> I don't know what you expect from the newsgroup here? I'd expect
> that asking for some description of programming practice
> is not unusual.

No, but the way you ask, matters. Your's was more of the kind "So, Mr
Blume, how do you develop then" and not "Hey, people, you're touting
static typing, so how can it be used, where can I find out more about
it and what's about this area where I see a problem". The former style
to ask always implies that your opponent doesn't know what he's doing,
is delusional with regard to his development method (implying he only
thinks he is profiting from static typeing) and so on. 

I hope you see the problem.

> I was hoping that there was already some good description
> available. 

The usual text books, I'd reply. I haven't checked them all, but I
suggest looking in books around software development methodology like
Bird and Wadlers Haskel book, but also all books written about
verification and developing software from specification (like the
books of Clif B Jones around VDM-SL and perhaps Gries' "Science of
Programming").

But indeed, my impression was not that you asked for references. My
memory is that you exlicitely asked "so how do you [M Blume] write
software?".

> Mr. Blume later pointed out some earlier post which contained some
> useful information.

> There are people who find this topic interesting.

So do I.

> ... lot's of Markus' drivel deleted ...

Yeah, yeah. All drivel.

> Rereading your post, I find not a single useful information in there.

Since now you have an axe to grind with me, I'm not surprised. But you
must know I'm not here to give useful information to you. My last
reply had the sole purpose to point out th error of your ways.

Regards -- Markus

PS: Considering the repeated use of the word "drivel" and the attempt
    to insult me as troll I already regret that I tried to give you
    one or to incomplete references where to look for the information
    you might be missing. I'm sure you can denigrate the information
    given as incomplete (I'm not in the mood to do book reviews for
    you, nor dredging up complete bibliographies) and delete this
    paragraph as drivel.

    Note, that so far I haven't tried to insult you as troll or
    similar. Think about the difference. <censored/>
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1186099929.857414.246580@m37g2000prh.googlegroups.com>
On Aug 2, 2:05 pm, ·····································@ANDTHATm-e-
leypold.de (Markus E.L. 2) wrote:
> Because you (a) that was how I came into the thread by defending what
> I consider Jon's right of free speech against the
> movement

The "Jon-is-a-spammer" movement does not interfere with his "right of
free speech".

> b) because you started the thread (well, the immediate
> precursor) by copying a post of Jon from c.l.l to c.l.f after flagging
> Jon post there as a troll. Actually an action by you I still don't
> understand.

Harrop's posts are arguably on-topic for clf.  They're mostly not for
cll.  As a result, his posts that are trollish in cll are not trollish
in clf.

Suppose that a C++ book author and consultant decided that clf folks
were confused about the worth of functional languages.  Would it be
appropriate for her to continuously post advocacy and argument to clf?

If so, maybe I'll dig up someone who wants to convert the clf folk to
the one true way.  Do you folks like hot monkey sex?
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <tiodhpiha3.fsf@hod.lan.m-e-leypold.de>
Andy Freeman wrote:

> On Aug 2, 2:05 pm, ·····································@ANDTHATm-e-
> leypold.de (Markus E.L. 2) wrote:
>> Because you (a) that was how I came into the thread by defending what
>> I consider Jon's right of free speech against the
>> movement
>
> The "Jon-is-a-spammer" movement does not interfere with his "right of
> free speech".

That's good to know. Some time in between I had a different
impression, specifically after being accused of being a troll myself,
because I didn't find the arguments of the Jon-is-a-spammer movement
perticularly convincing. 

Good to know this impression was totally wrong. :-].

>
>> b) because you started the thread (well, the immediate
>> precursor) by copying a post of Jon from c.l.l to c.l.f after flagging
>> Jon post there as a troll. Actually an action by you I still don't
>> understand.
>
> Harrop's posts are arguably on-topic for clf.  They're mostly not for
> cll.  As a result, his posts that are trollish in cll are not trollish
> in clf.

Yes, yes, yes. We heard all that. Only when discussing with me, the
disputants somehow failed to point to the list charta and where Jon's
post are actually OT. Somebody even hinted darkly that Jon is a really
artful spammer who camouflages his spam by filling it with on topic
text. Whatever. Don't construe this as a an invitation to resume this
topic or to pull out the c.l.l. list charta now: It has become stale
and I'm not really interested any more. 


> Suppose that a C++ book author and consultant decided that clf folks
> were confused about the worth of functional languages.  Would it be
> appropriate for her to continuously post advocacy and argument to clf?

Analogies? I just refuse them, because the last time they were about
timberframe and paint jobs and whatnot, and frankly: It just removes
us another level from the topic at hand, because now we'd have to
discuss the correspondence of the analogy to the situation first.

> If so, maybe I'll dig up someone who wants to convert the clf folk to
> the one true way.  

Please do so. :-).

> Do you folks like hot monkey sex?

Is that something people do in C++? Probably something that happens to
you after applying Templates to gratiously? Terrible ...

Regards -- Markus
From: George Neuner
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <i7e5b39ruvvshtt34ngfbidn3ntvoo30lg@4ax.com>
On Fri, 03 Aug 2007 02:38:28 +0200,
·····································@ANDTHATm-e-leypold.de (Markus
E.L. 2) wrote:

>
>Andy Freeman wrote:
>>
>> Harrop's posts are arguably on-topic for clf.  They're mostly not for
>> cll.  As a result, his posts that are trollish in cll are not trollish
>> in clf.
>
>Yes, yes, yes. We heard all that. Only when discussing with me, the
>disputants somehow failed to point to the list charta and where Jon's
>post are actually OT. 

Nobody in c.l.l cares to keep a litany of Jon's posts - you can always
google for them if you care to.

I read both c.l.f and c.l.l and in c.l.f Jon's posts are usually
relevant and frequently very interesting. I think he has contributed
quite a lot of benefit to c.l.f.


However, in c.l.l he is a nuisance.  Jon may be a genius at Ocaml, but
his knowledge of Lisp has repeatedly been shown to be simplistic, out
of date or just plain wrong.  He ignores the fact that most Lispers
have considerable experience with other languages and dismisses their
reasons for preferring Lisp as foolishness or lack of understanding.
He ignores differences in language culture and programming
methodologies.  His preferred benchmark (the ray tracer) has been
discredited several times for test target and for methodology, but he
continues to cling to it because the results have (so far) favored his
pet language.

When experienced Lispers tire of arguing with him, he turns his
attention to newbies - often criticizing a given Lisp solution and
occasionally posting an irrelevant Ocaml solution to show that it is
"shorter" or "more concise".

Note that providing code in a different language than the forum
generally discusses is not bad as long as the intent is to be helpful
- as in "I don't know your language well but here is how I would solve
the problem in X".  However, too many of Jon's posts to c.l.l take the
tone "solutions in this language are complex/verbose/slow/etc., you
should use this other language".  Such condescending posts are not
generally welcome anywhere.

It is clear to readers of c.l.l that Jon's agenda there is to
discourage people from learning Lisp.  I very much doubt that any
Lispers (even Rainer Joswig) give a damn what Jon Harrop thinks - but
I believe many of them object to mis-information being aimed at new
users.

FWIW: Jon pulls the same crap in other forums as well.  He has been
branded a troll in c.l.c++ and is not welcome (though not yet
considered a troll) in c.l.java.

There are forums for language advocacy and no one will fault Jon for
preaching Ocaml, static typing, type inference or whatever else his
heart desires in those places.  Continually raising topics in forums
in which they are irrelevant makes him unwelcome.

George
--
for email reply remove "/" from address
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13bjt8vaqfn5d15@corp.supernews.com>
George Neuner wrote:
> It is clear to readers of c.l.l that Jon's agenda there is to
> discourage people from learning Lisp.  I very much doubt that any
> Lispers (even Rainer Joswig) give a damn what Jon Harrop thinks - but
> I believe many of them object to mis-information being aimed at new
> users.

Ironically, the misinformation from the Lisp community aimed at newbies is
the main reason I post there.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-572253.23061002082007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L. 2) wrote:

...

Das bringt nichts, Markus.

-- 
http://lispm.dyndns.org
From: Markus E.L. 2
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1t3az1o948.fsf@hod.lan.m-e-leypold.de>
Rainer Joswig wrote:

> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L. 2) wrote:
>
> ...
>
> Das bringt nichts, Markus.

Nein, tut's nicht, Rainer. Gut, dass Du das endlich einsiehst. Wir
haben n�mlich schon auf dem falchen Fuss miteinander angefangen.

Sch�nen Tag noch -- Markus
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f94lkt$i1o$1@registered.motzarella.org>
Markus E.L. schrieb:

> Not really, Rainer. What you and Raffaelo had to say on static typing
> exposed a sad lack of understanding on an established programming
> methodology (I'm not talking about "liking it" but about an abstract
> understanding how the proponents of the method suggest to apply it).

Funnily it feels for me exactly the same when some people here begin to
speculate about macros (or compare an external preprocessor with macros
that are built in into a dynamic environment).


Andr�
-- 
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <in643z8vcu.fsf@hod.lan.m-e-leypold.de>
Rainer said:

> In article <··············@hod.lan.m-e-leypold.de>,
>  ·····································@ANDTHATm-e-leypold.de (Markus 
>  E.L.) wrote:
>
>> > In article <··············@my.address.elsewhere>,
>> >  Matthias Blume <····@my.address.elsewhere> wrote:
>> >
>> >> Andr� Thieme <······························@justmail.de> writes:
>> >> 
>> >> > Matthias Blume schrieb:
>> >> >> Raffael Cavallaro
>> >> >> <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>> >> >>
>> >> >>> partially known range of inputs -> combination of human interactive
>> >> >>> and algorithmic processing -> unpredictable range of outputs
>> >> >>>
>> >> >>>
>> >> >>> If this is the future of computing, then the focus on static typing is
>> >> >>> a massive effort in solving the wrong problem.
>> >> >>
>> >> >> I, for one, think that research in static typing is exactly the
>> >> >> /right/ effort.  In my experience (and I do have experience with both
>> >> >> paradigms), I find it easier and faster to perform "exploratory"
>> >> >> programming with a static type system to help me along.
>> >> >
>> >> > What about the Erlang approach?
>> >> > It is dynamically typed, but with Dialyzer [1] one can analyze the
>> >> > code. For Lisp Qi can do these kinds of things in some sense, but one
>> >> > could develop a Lisp-Dialyzer as well. It should be able to talk about
>> >> > more or less all type errors. In cases where the program is not sure if
>> >> > the programmer intended changing the type during runtime it could simply
>> >> > ask her, or spit out warnings.
>> >> > If the compilers get such a mode they could also get out more speed
>> >> > from dynamic code.
>> >> 
>> >> I'm sure all these things are fine tools.  However, they don't address
>> >> what I am getting at.  When I write code, I spend most of my time
>> >> thinking about data-structural invariants.
>> >
>> > I can't say that I spend much time thinking about invariants
>> > while programming.
>> 
>> So why are you asking for a type system in which invariants are (I
>> assume) checked automatically? (like: primality, certain properties of
>> the rewriting rule set you mentioned?).
>
> are these 'data structural invariants'?

Yes. From a software engineering point of view a condition on a data
structure that limits the number of permissible values for that
structure is called an invariant. In static type systems those are
mostly not checked by the compiler, but implied in the type and
checked at conversion time.

Usual application is to limit the number of permissible representations of an abstract type to those that correspond to an abstract value.

E.g: In another subthread the representation of a prime was an
Int. The invariant is the predicate "is a prime", which limits the set
of Ints permissible for values of type prime.

> I still wait for an explanation of Mr. Blume's 'development process'.
> See the question you haven't quoted.

Why should I quote it?

> Btw. your quoting is somehow fucked up. You constantly miss to mention

Yes, that's true. I'l fix it this afternoon, now that someone is
complaining about it.

> the person your are quoting. I wrote the sentence you are quoting,
> yet my name is not mentioned above. 

> Your 'Some cool user agent (SCUG)'
> (that's in the header) does not seem to be very cool...

It's written in Lisp, actually and some time ago I f***ed up the
configuration and haven't come around to looking into it.

Regards -- Markus
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185895776.965927.145710@i13g2000prf.googlegroups.com>
On Jul 30, 8:39 pm, Matthias Blume <····@my.address.elsewhere> wrote:
> ....  When I write code, I spend most of my time
> thinking about data-structural invariants.  What a type system such as
> ML's lets me do is write these invariants down in such a way that the
> compiler can then verify that my code actually adheres to them.  Thus,
> the type system provides me with (a) the necessary notation for
> writing down invariants, and (b) with the guarantee that violations of
> these invariants are discovered early in the development cycle.

I think that that process is fairly similar to how I design and code.

I spend my time worrying about something like types.  (I call the
concept "units" because the "type" has unfortunate technical meaning
and units lets me make a physics analogy.)  If I need to know how many
apples, there are, I need to be able to know that I'm counting all
apples and only apples.  It's mostly a matter of figuring out the
relevant entities and their relationships.

I find that static type systems are good at ensuring that I don't mess
up representations, but that's a small minority of the problems that I
actually have and they introduce other problems that often swamp that
small benefit.

-andy
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8oa2g$n45$1@registered.motzarella.org>
Andy Freeman schrieb:
> On Jul 30, 8:39 pm, Matthias Blume <····@my.address.elsewhere> wrote:
>> ....  When I write code, I spend most of my time
>> thinking about data-structural invariants.  What a type system such as
>> ML's lets me do is write these invariants down in such a way that the
>> compiler can then verify that my code actually adheres to them.  Thus,
>> the type system provides me with (a) the necessary notation for
>> writing down invariants, and (b) with the guarantee that violations of
>> these invariants are discovered early in the development cycle.
> 
> I think that that process is fairly similar to how I design and code.
> 
> I spend my time worrying about something like types.  (I call the
> concept "units" because the "type" has unfortunate technical meaning
> and units lets me make a physics analogy.)  If I need to know how many
> apples, there are, I need to be able to know that I'm counting all
> apples and only apples.  It's mostly a matter of figuring out the
> relevant entities and their relationships.
> 
> I find that static type systems are good at ensuring that I don't mess
> up representations, but that's a small minority of the problems that I
> actually have and they introduce other problems that often swamp that
> small benefit.

We can however see that some big Lisp applications can run for years
without spitting (run-) type errors.
Also don't forget that the dynamically typed Erlang has programs that
must run for years and route millions and millions of telephone calls.
They seem to be quite happy with Erlang, and using the Dyalizer seems
also to help very well.


Andr�
-- 
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x8x8wyz4g.fsf@ruckus.brouhaha.com>
Andr� Thieme <······························@justmail.de> writes:
> We can however see that some big Lisp applications can run for years
> without spitting (run-) type errors.

The same is true for many assembly language applications.  It's partly
a matter of how much debugging you're willing to do.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46afe93c$0$1604$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> We can however see that some big Lisp applications can run for years
> without spitting (run-) type errors.

I think it is fair to say that some big Lisp applications run for years
without breaking in an unfixable way. However, that doesn't mean they
weren't being run in an interactive environment that stopped when a thread
hit a type error, let the programmer fix it and continue.

> Also don't forget that the dynamically typed Erlang has programs that
> must run for years and route millions and millions of telephone calls.
> They seem to be quite happy with Erlang, and using the Dyalizer seems
> also to help very well.

As I understand it, Dyalizer is a static type checker that you can choose to
run.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-3259D7.04245201082007@news-europe.giganews.com>
In article <························@ptn-nntp-reader02.plus.net>,
 Jon Harrop <···@ffconsultancy.com> wrote:

> Andr� Thieme wrote:
> > We can however see that some big Lisp applications can run for years
> > without spitting (run-) type errors.
> 
> I think it is fair to say that some big Lisp applications run for years
> without breaking in an unfixable way. However, that doesn't mean they
> weren't being run in an interactive environment

False.

> that stopped when a thread
> hit a type error, let the programmer fix it and continue.



> 
> > Also don't forget that the dynamically typed Erlang has programs that
> > must run for years and route millions and millions of telephone calls.
> > They seem to be quite happy with Erlang, and using the Dyalizer seems
> > also to help very well.
> 
> As I understand it, Dyalizer is a static type checker that you can choose to
> run.

-- 
http://lispm.dyndns.org
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <joswig-C023F0.10022730072007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
> > This isn't advanced mathematics people. The trade offs are
> > obvious. People who have used lisp and ocaml (for example) and
> > continue to use lisp obviously value the convenience and dynamism more
> > than the limited correctness proofs and some performance gains.
> 
> Are there are a lot of these?

Are there a 'lot' Lisp programmers? A 'lot' SFPL programmers?

> 
> > Those who continue to use ocaml (for example) obviously value the
> > limited correctness proofs and performance gains more.
> 
> I don't know if I'd dignify the benefits of ML-style static typing
> with a fancy term like "limited correctness proof".

But you don't believe in arguments like: 'since I'm using
static types my programs are suddenly correct when
I get the compiler to compile the code without errors'?
These are the arguments I often read from static typing
proponents.

>  I'd say something
> more like: language-verified type consistency gives the program
> something like a rigid skeleton on which to build its functionality.
> Lisp and Python programs have always evoked sort of a flopping,
> "invertebrate" sensation for me.  Alan Perlis's SICP foreword famously
> said:
> 
>     Pascal is for building pyramids -- imposing, breathtaking, static
>     structures built by armies pushing heavy blocks into place. Lisp is
>     for building organisms -- imposing, breathtaking, dynamic structures
>     built by squads fitting fluctuating myriads of simpler organisms into
>     place.
> 
> ML-like languages, I like to imagine, aim for something in the middle:
> graceful spires and leaping archways built on solid foundations
> through precise engineering.
> 
> > The greater traffic of c.l.l. might also suggest something about which
> > things more programmers value.
> 
> clf is mostly a theory discussion group, I think.  Lisp has been
> around a long time and has more users, and languages like CL have a
> lot more "creature comforts" (e.g. debugging environments, language
> features like keyword args) than the ML family.  But really, CL and
> even Scheme are 1970's languages, and things have been happening since
> then.

ML is from when? Haskell is from when? The improvement
of ML over time is also only a small delta of features.
I just read the web page of Successor ML and was not
overwhelmed in terms of value what it means for
programmers.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xlkcydmsv.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> > > People who have used lisp and ocaml (for example) and
> > > continue to use lisp 
> > Are there are a lot of these?
> Are there a 'lot' Lisp programmers? A 'lot' SFPL programmers?

Are there a lot of experienced Ocaml users who went back to Lisp.

> But you don't believe in arguments like: 'since I'm using
> static types my programs are suddenly correct when
> I get the compiler to compile the code without errors'?
> These are the arguments I often read from static typing
> proponents.

I don't think anyone believes that literally but static checking does
catch a lot of errors that would have resulted in runtime exceptions
in Lisp.  And that's just with ML, whose type system is rather
primitive.  I'm still too much of a newbie to understand what really
powerful type systems can do.  I have the (possibly deluded) notion
that in Concoqtion, e.g., you could in principle define a "Prime" type
(for prime numbers) with an appropriate construction for the type.
Then any function returning a Prime that successfully compiles, cannot
possibly return a composite number, i.e. the language implements
full-blown constructive type theory.  There is a proof assistant (Coq,
hence the name Concoqtion) embedded in the language that you use to
build the types and annotate the code until the compiler can be sure
that the function can only return a prime.

> ML is from when? Haskell is from when? The improvement of ML over
> time is also only a small delta of features.  I just read the web
> page of Successor ML and was not overwhelmed in terms of value what
> it means for programmers.

Concoqtion (mentioned above) is an OCaml extension.  Whether it's of
practical value for programmers is maybe uncertain, but it's a long
way from being a small delta of features.  I think of Lisp as being
basically a 1970's language, while Haskell is a 1990's language.  I'm
not a PL expert myself so I don't have a clear idea of what's going on
at the leading edge.  I imagine Concoqtion to be an example, but I
could be wrong.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <joswig-111586.16522730072007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > > > People who have used lisp and ocaml (for example) and
> > > > continue to use lisp 
> > > Are there are a lot of these?
> > Are there a 'lot' Lisp programmers? A 'lot' SFPL programmers?
> 
> Are there a lot of experienced Ocaml users who went back to Lisp.
> 
> > But you don't believe in arguments like: 'since I'm using
> > static types my programs are suddenly correct when
> > I get the compiler to compile the code without errors'?
> > These are the arguments I often read from static typing
> > proponents.
> 
> I don't think anyone believes that literally but static checking does
> catch a lot of errors that would have resulted in runtime exceptions
> in Lisp.  And that's just with ML, whose type system is rather
> primitive.  I'm still too much of a newbie to understand what really
> powerful type systems can do.  I have the (possibly deluded) notion
> that in Concoqtion, e.g., you could in principle define a "Prime" type
> (for prime numbers) with an appropriate construction for the type.
> Then any function returning a Prime that successfully compiles, cannot
> possibly return a composite number, i.e. the language implements
> full-blown constructive type theory.  There is a proof assistant (Coq,
> hence the name Concoqtion) embedded in the language that you use to
> build the types and annotate the code until the compiler can be sure
> that the function can only return a prime.
> 
> > ML is from when? Haskell is from when? The improvement of ML over
> > time is also only a small delta of features.  I just read the web
> > page of Successor ML and was not overwhelmed in terms of value what
> > it means for programmers.
> 
> Concoqtion (mentioned above) is an OCaml extension.  Whether it's of
> practical value for programmers is maybe uncertain, but it's a long
> way from being a small delta of features.  I think of Lisp as being
> basically a 1970's language, while Haskell is a 1990's language.

Haskell started in 1987/88, also based on earlier languages
like Miranda. The Haskell 1.0 Report is from 1990.
I'd say Haskell is from the 80s.
http://research.microsoft.com/~simonpj/papers/history-of-haskell/history.pdf

Common Lisp was started around 1981, the work on the ANSI Standard was
started in 1986 and it got published in 1994.
Common Lisp is also from the 80s.

>  I'm
> not a PL expert myself so I don't have a clear idea of what's going on
> at the leading edge.  I imagine Concoqtion to be an example, but I
> could be wrong.

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7x3az59ava.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> Haskell started in 1987/88, also based on earlier languages
> like Miranda. The Haskell 1.0 Report is from 1990.
> I'd say Haskell is from the 80s.
> http://research.microsoft.com/~simonpj/papers/history-of-haskell/history.pdf
> 
> Common Lisp was started around 1981, the work on the ANSI Standard was
> started in 1986 and it got published in 1994.
> Common Lisp is also from the 80s.

Well, Haskell as we know it (with type classes, monads, etc.) is from
later than the 1.0 report.  Those were significant changes to the
language, things that hadn't been done before, and that stuff happened
in the 1990's.  CL was sort of a repackaging of features from earlier
Lisps.  It was intended for practical development and porting of code
from earlier systems, while Haskell was (and is) more of a PL research
testbed.  As such, by intention, CL did not attempt much that was
really new, as opposed to unifying a bunch of divergent threads of
development (Interlisp, Zetalisp, Scheme, etc).  I'll defer to the
Lispers whether CL really felt different than the older large Lisps.
I can accept that putting CL together wouldn't have been possible
before the 80's and therefore it's an 80's language but by that
standard I couldn't push Haskell back to the 80's.  
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <joswig-EE5699.17282930072007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > Haskell started in 1987/88, also based on earlier languages
> > like Miranda. The Haskell 1.0 Report is from 1990.
> > I'd say Haskell is from the 80s.
> > http://research.microsoft.com/~simonpj/papers/history-of-haskell/history.pdf
> > 
> > Common Lisp was started around 1981, the work on the ANSI Standard was
> > started in 1986 and it got published in 1994.
> > Common Lisp is also from the 80s.
> 
> Well, Haskell as we know it (with type classes, monads, etc.) is from
> later than the 1.0 report.  Those were significant changes to the
> language, things that hadn't been done before, and that stuff happened
> in the 1990's.  CL was sort of a repackaging of features from earlier
> Lisps.  It was intended for practical development and porting of code
> from earlier systems, while Haskell was (and is) more of a PL research
> testbed.  As such, by intention, CL did not attempt much that was
> really new, as opposed to unifying a bunch of divergent threads of
> development (Interlisp, Zetalisp, Scheme, etc). 

That's not true. CL draws a lot from experience that
was gained throughout the 80s. For example CLOS got developed new
through the 80s.

> I'll defer to the
> Lispers whether CL really felt different than the older large Lisps.
> I can accept that putting CL together wouldn't have been possible
> before the 80's and therefore it's an 80's language but by that
> standard I couldn't push Haskell back to the 80's.  

http://citeseer.ist.psu.edu/cache/papers/cs/4583/http:zSzzSzcm.bell-labs.comzSzwhozSzwadlerzSztopicszSz..zSzpaperszSzmonadszSzmonads.pdf/wadler92comprehending.pdf

First sentence:  'Category theorists invented monads in the 1960's'.
Haskell is a language of the 60s?

-- 
http://lispm.dyndns.org
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <7xabtddht6.fsf@ruckus.brouhaha.com>
Rainer Joswig <······@lisp.de> writes:
> First sentence:  'Category theorists invented monads in the 1960's'.
> Haskell is a language of the 60s?

Only if Lisp is a 1930's language since that's when Church invented
lambda calculus.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <joswig-CB3E0F.19341630072007@news-europe.giganews.com>
In article <··············@ruckus.brouhaha.com>,
 Paul Rubin <·············@NOSPAM.invalid> wrote:

> Rainer Joswig <······@lisp.de> writes:
> > First sentence:  'Category theorists invented monads in the 1960's'.
> > Haskell is a language of the 60s?
> 
> Only if Lisp is a 1930's language since that's when Church invented
> lambda calculus.

That would apply to Haskell, too. ;-)

The paper was from 90 and cites some stuff from the 80s.

Btw., thanks for the good style of your replies.

-- 
http://lispm.dyndns.org
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8m2v5$uvk$2@online.de>
Rainer Joswig schrieb:
> http://citeseer.ist.psu.edu/cache/papers/cs/4583/http:zSzzSzcm.bell-labs.comzSzwhozSzwadlerzSztopicszSz..zSzpaperszSzmonadszSzmonads.pdf/wadler92comprehending.pdf
> 
> First sentence:  'Category theorists invented monads in the 1960's'.
> Haskell is a language of the 60s?

Now you're being silly. That's just like saying "addition was invented 
by the Greek, so any programming language that has addition dates back 
to B.C. times."
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5p3az4g1lc.fsf@hod.lan.m-e-leypold.de>
> Rainer Joswig schrieb:
>> http://citeseer.ist.psu.edu/cache/papers/cs/4583/http:zSzzSzcm.bell-labs.comzSzwhozSzwadlerzSztopicszSz..zSzpaperszSzmonadszSzmonads.pdf/wadler92comprehending.pdf
>> First sentence:  'Category theorists invented monads in the 1960's'.
>> Haskell is a language of the 60s?
>
> Now you're being silly. That's just like saying "addition was invented
> by the Greek, so any programming language that has addition dates back
> to B.C. times."

Well, if he wants to give Haskell a pedigree as long as Lisp's, that's
fine with me :-).

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8m2r2$uvk$1@online.de>
Rainer Joswig schrieb:
> Common Lisp was started around 1981, the work on the ANSI Standard was
> started in 1986 and it got published in 1994.
> Common Lisp is also from the 80s.

A lot of Lisp's central design decisions still date back to the 50s.

Regards,
Jo
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u4pjlgw8d.fsf@nhplace.com>
[ comp.lang.lisp only
  http://www.nhplace.com/kent/PFAQ/cross-posting.html ]

Joachim Durchholz <··@durchholz.org> writes:

> Rainer Joswig schrieb:
> > Common Lisp was started around 1981, the work on the ANSI Standard was
> > started in 1986 and it got published in 1994.
> > Common Lisp is also from the 80s.
> 
> A lot of Lisp's central design decisions still date back to the 50s.

Yeah, we celebrate that.  Good ideas die hard.

I'm not sure I understand the odd change in direction this discussion
has taken.  Are we talking flash-in-the-pan fashion or durable utility
here?  Why has it suddenly become a liability to stand the test of
time?

Java, for example, has set itself up as a language that is to stand the
test of time.  In a decade or two, if it survives (and I suspect it will),
will we be criticizing it merely for being old?  It doesn't make sense.
Discussion about a language's goodness on the basis of its age seems
somewhat ad hominem to me...
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-4E6628.07384931072007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Rainer Joswig schrieb:
> > Common Lisp was started around 1981, the work on the ANSI Standard was
> > started in 1986 and it got published in 1994.
> > Common Lisp is also from the 80s.
> 
> A lot of Lisp's central design decisions still date back to the 50s.
> 
> Regards,
> Jo

Yeah, like

* computation with functions
* 'inspiration' from lambda calculus
* list processing
* garbage collection
* data structure for code (code as data)
* interactive use through READ, EVAL and PRINT
* macros

and so on...

others later

* lexical binding, closures
* object system
* meta-object protocol
* exception handling
* various things around compilation
* unicode
* packages
* systems
* FFI

...

-- 
http://lispm.dyndns.org
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <m2y7gyatw0.fsf@my.address.elsewhere>
Rainer Joswig <······@lisp.de> writes:

> ML is from when? Haskell is from when? The improvement
> of ML over time is also only a small delta of features.
> I just read the web page of Successor ML and was not
> overwhelmed in terms of value what it means for
> programmers.

Do you have any first-hand experience (or any experience at all for
that matter) of "what it means for programmers"?

I'm not sure how seriously I should take anything you say.  Have you
ever used ML (or similar languages) for anything bigger than a toy
project?  Or for any project at all?  Or are you just reading some web
pages and don't get overwhelmed because they don't match your
long-held opinions?

You talk about disadvantages of static typing in a way that is almost
180 degrees opposite of what I have experienced.  For example, the
notion that type inference gets you the worst of both worlds is IMO
completely ridiculous.

So, please, tell me:  Why should I listen to your opinion on static
type systems?
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <u94pjm19id.fsf@hod.lan.m-e-leypold.de>
> In article <··············@ruckus.brouhaha.com>,
>  Paul Rubin <·············@NOSPAM.invalid> wrote:
>
>> Raffael Cavallaro <················@pas-d'espam-s'il-vous-plait-mac.com> writes:
>> > This isn't advanced mathematics people. The trade offs are
>> > obvious. People who have used lisp and ocaml (for example) and
>> > continue to use lisp obviously value the convenience and dynamism more
>> > than the limited correctness proofs and some performance gains.
>> 
>> Are there are a lot of these?
>
> Are there a 'lot' Lisp programmers? A 'lot' SFPL programmers?
>
>> 
>> > Those who continue to use ocaml (for example) obviously value the
>> > limited correctness proofs and performance gains more.
>> 
>> I don't know if I'd dignify the benefits of ML-style static typing
>> with a fancy term like "limited correctness proof".
>
> But you don't believe in arguments like: 'since I'm using
> static types my programs are suddenly correct when
> I get the compiler to compile the code without errors'?
> These are the arguments I often read from static typing
> proponents.

Actually their arguments are a bit more contrieved. Now you have me
wondering if you really didn't understand them or distorting them
intentionally? Or just hyperbole as a figure of speeach?

Regards -- Markus
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8k3m4$ia8$1@online.de>
Raffael Cavallaro schrieb:
> On 2007-07-29 14:42:10 -0400, Joachim Durchholz <··@durchholz.org> said:
> 
>> In a language with type inference, you can work almost as freely as in 
>> a language with dynamic typing.
> 
> And if the limited correctness proofs and increased performance are 
> worth this inconvenience - 'almost as freely' isn't the same as 'as 
> freely' in exploratory programming - then you'll choose a modern 
> statically typed language. If the incovenience is not worth it to you, 
> you'll stick with lisp.

You're presenting this as a black-and-white choice that the issue really 
isn't.
Type inference is a lot like "static typing without the pain", or 
"dynamic typing without the type errors".

> What strikes the correspondents of c.l.l as sleazy is Jon's pretending 
> to be interested in lisp, but really only trying to sell his consulting 
> services in a newsgroup that has greater traffic than that for *all* 
> functional languages combined.

What's Jon's intentions, alleged or real, to do with what *I* write???

> The greater traffic of c.l.l. might also suggest something about which 
> things more programmers value.

If this statement had any value, you'd have to drop Lisp NOW and start 
programming in C++. Or Java. Or even Visual Basic.
I don't have the latest newsgroup statistics handy, but I'm pretty sure 
that Lisp is less mainstream than any of the three above, by orders of 
magnitude.

Regards,
Jo
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-C8DEA4.09552830072007@news-europe.giganews.com>
In article <············@online.de>,
 Joachim Durchholz <··@durchholz.org> wrote:

> Raffael Cavallaro schrieb:
> > On 2007-07-29 14:42:10 -0400, Joachim Durchholz <··@durchholz.org> said:
> > 
> >> In a language with type inference, you can work almost as freely as in 
> >> a language with dynamic typing.
> > 
> > And if the limited correctness proofs and increased performance are 
> > worth this inconvenience - 'almost as freely' isn't the same as 'as 
> > freely' in exploratory programming - then you'll choose a modern 
> > statically typed language. If the incovenience is not worth it to you, 
> > you'll stick with lisp.
> 
> You're presenting this as a black-and-white choice that the issue really 
> isn't.
> Type inference is a lot like "static typing without the pain", or 
> "dynamic typing without the type errors".

Actually it is worse:

* with type inference you need to write less types
  which means less explicit declarations of type constraints

* still type inference infers at compile time and the
  code ends up inflexible at runtime

> 
> > What strikes the correspondents of c.l.l as sleazy is Jon's pretending 
> > to be interested in lisp, but really only trying to sell his consulting 
> > services in a newsgroup that has greater traffic than that for *all* 
> > functional languages combined.
> 
> What's Jon's intentions, alleged or real, to do with what *I* write???
> 
> > The greater traffic of c.l.l. might also suggest something about which 
> > things more programmers value.
> 
> If this statement had any value, you'd have to drop Lisp NOW and start 
> programming in C++. Or Java. Or even Visual Basic.
> I don't have the latest newsgroup statistics handy, but I'm pretty sure 
> that Lisp is less mainstream than any of the three above, by orders of 
> magnitude.

I thought the discussion was about suddenly Static FPLs are
the successors of Lisp and that a mass migration of
Lisp programmers to c.l.f has happened or will happen anytime soon.
The last migration from Lisp to FPLs was a few decades ago
when FPLs were developed. Many language theoretic academics
jumped ship. Seems that the 'successor' is not that popular
right now, even less than Lisp?

> 
> Regards,
> Jo

-- 
http://lispm.dyndns.org
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8kbmt$rdn$1@online.de>
Rainer Joswig schrieb:
> In article <············@online.de>,
>  Joachim Durchholz <··@durchholz.org> wrote:
> 
>> Raffael Cavallaro schrieb:
>>> On 2007-07-29 14:42:10 -0400, Joachim Durchholz <··@durchholz.org> said:
>>>
>>>> In a language with type inference, you can work almost as freely as in 
>>>> a language with dynamic typing.
>>> And if the limited correctness proofs and increased performance are 
>>> worth this inconvenience - 'almost as freely' isn't the same as 'as 
>>> freely' in exploratory programming - then you'll choose a modern 
>>> statically typed language. If the incovenience is not worth it to you, 
>>> you'll stick with lisp.
>> You're presenting this as a black-and-white choice that the issue really 
>> isn't.
>> Type inference is a lot like "static typing without the pain", or 
>> "dynamic typing without the type errors".
> 
> Actually it is worse:
> 
> * with type inference you need to write less types
>   which means less explicit declarations of type constraints

No, it means no redundant writing-down of type constraints.

You still have the types themselves, mostly in the form of a tagged 
union declaration such as

   datatype AffineCoordinate 'a = AffinePair 'a 'a | AffineInfinity

When constructing new values, you see stuff like

   AffinePair 0.0 0.0

and know it's talking about AffineCoordinates.
When taking parameters, either you see

   f (AffinePair 0.0 0.0)
   ...
   f coord = ...

and know that coord is an AffinePair, or (if the call and the 
declaration are in different modules) you can have

   f :: AffinePair 'a 'a -> some_result_type
   f coord = ...

(but if the inside of f declares a local name 'temp' that's assigned 
from coord, type inference kicks in again, so you don't have to repeat 
the type declaration for 'temp').


I.e. you can have type declarations where they help the human 
programmer, and leave them out where he doesn't need them. The compiler 
can do just fine without such a declaration. (Ordinarily - for some 
recursive types, the compiler does still need an additional type 
declaration to help the inference over the recursion, and that's indeed 
one of the cases of a logically gratuitous type declaration, but this 
kind of stuff is quite rare in practice.)

> * still type inference infers at compile time and the
>   code ends up inflexible at runtime

Now that's something that hasn't been discussed yet.

What kinds of inflexibility do you predict?

>>> The greater traffic of c.l.l. might also suggest something about which 
>>> things more programmers value.
>> If this statement had any value, you'd have to drop Lisp NOW and start 
>> programming in C++. Or Java. Or even Visual Basic.
>> I don't have the latest newsgroup statistics handy, but I'm pretty sure 
>> that Lisp is less mainstream than any of the three above, by orders of 
>> magnitude.
> 
> I thought the discussion was about suddenly Static FPLs are
> the successors of Lisp and that a mass migration of
> Lisp programmers to c.l.f has happened or will happen anytime soon.

No, I was just refuting Raffael's argument that you can gain insight by 
looking a newsgroups bandwidth.

> The last migration from Lisp to FPLs was a few decades ago
> when FPLs were developed. Many language theoretic academics
> jumped ship. Seems that the 'successor' is not that popular
> right now, even less than Lisp?

There were a lot of obstacles to overcome: pure FPLs were too slow, pure 
programs were so small that their advantages didn't pay off visibly, the 
usefulness of Hindley-Milner type inference wasn't explored well enough, 
syntax had to be shaken out, unclear preferences whether strict or 
non-strict approaches were better, and probably a lot others that I 
overlooked.
For non-strict languages, things got quite solid at the time when 
Haskell was created. Before, half a dozen or so research languages had 
existed, and the designs had been converging for a while, leaving only 
superficial differences; researchers then decided to speed up the 
convergence process and create a single useful language that would cater 
all their research needs. The result was a very simple and elegant 
language, far better than what you usually get from a committee; in fact 
it was so good that non-strict programming has been done almost 
exclusively in Haskell since.
They also started to move Haskell towards production quality 
(considerably complexifying the language again). This has been underway 
for roughly a decade now, and the language has already taken roughly 90% 
of that route - it's mostly library problems that remain, the language 
itself is stable enough.

In summary, the FPL offspring of Lisp has been maturing to a point where 
it can start to compete.
Two decades ago, implementations were too slow by orders of magnitude.
One decade ago, implementations where still too slow by a large factor, 
too shaky, and provided too cryptic error messages, and had too little 
libraries for doing anything but writing compilers with them.
Five years ago, implementations were reasonably fast, reasonably solid, 
error messages started to improve, libraries started to grow.
Today, speed should be on par with "normal" Lisps (those that aren't 
optimized for speed specifically), stability should be enough for 
production use, error messages have been improved further, and 90% of 
the libraries that you'd need for any real project are there (though 
every project has a different idea about what the missing 10% are, so 
there's still a lot of work to do).

In other words, Haskell is now where Lisp was ten or twenty years ago. 
It's no surprise that Lisp programmers haven't turned over to Haskell in 
droves: the language is just starting to be a serious alternative for 
day-to-day work.
(There's also the problem that learning a new language and paradigm is a 
lot of effort and time, so I wouldn't expect them to turn over in droves 
no matter what.)

Regards,
Jo
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073011142282327-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 03:24:07 -0400, Joachim Durchholz <··@durchholz.org> said:

> If this statement had any value, you'd have to drop Lisp NOW and start 
> programming in C++.

Not when the comparison is among *modern* statically typed languages and lisp.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f8m396$v9i$1@online.de>
Raffael Cavallaro schrieb:
> On 2007-07-30 03:24:07 -0400, Joachim Durchholz <··@durchholz.org> said:
> 
>> If this statement had any value, you'd have to drop Lisp NOW and start 
>> programming in C++.
> 
> Not when the comparison is among *modern* statically typed languages and 
> lisp.

Would you please quote full context, not have me hunting for it.
Oh, and making outrageous statements and qualifying them after the fact, 
with an adjective like "modern" that will place the boundary wherever 
you find it convenient, won't enlighten anybody.

Regards,
Jo
From: Dan Doel
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <847b1$46ae9b2a$d844bc44$26915@FUSE.NET>
Joachim Durchholz wrote:

> Raffael Cavallaro schrieb:
>> On 2007-07-30 03:24:07 -0400, Joachim Durchholz <··@durchholz.org> said:
>> 
>>> If this statement had any value, you'd have to drop Lisp NOW and start
>>> programming in C++.
>> 
>> Not when the comparison is among *modern* statically typed languages and
>> lisp.
> 
> Would you please quote full context, not have me hunting for it.
> Oh, and making outrageous statements and qualifying them after the fact,
> with an adjective like "modern" that will place the boundary wherever
> you find it convenient, won't enlighten anybody.

Even leaving aside the appeal to majority, looking at the traffic on
comp.lang.functional seems a poor way to judge the popularity of statically
typed functional languages. For instance, the haskell-cafe mailing list
alone gets more traffic than comp.lang.functional, in my experience
(excepting the recent Jon Harrop threads, which seem to be fueled mainly by
being cross-posted to comp.lang.lisp). There also seem to be OCaml mailing
lists that get more traffic than you'll find here. Scala looks mailing-list
based as well, and that may be true in general for most statically typed
functional languages these days.

By similar logic, gmane seems to have no mirrors of lists about Scheme or
Lisp, so one can only assume that nobody uses those languages. :)

-- Dan
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073102143443658-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 22:19:19 -0400, Dan Doel <········@gmail.com> said:

> By similar logic, gmane seems to have no mirrors of lists about Scheme or
> Lisp, so one can only assume that nobody uses those languages. :)

you can't have been looking very hard - I count over 120 different 
lists in the gmane.lisp hierarchy, everything from srfi, to varous 
scheme and common lisp implementations and libraries.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-44617D.07243331072007@news-europe.giganews.com>
In article <·····························@FUSE.NET>,
 Dan Doel <········@gmail.com> wrote:

> Joachim Durchholz wrote:
> 
> > Raffael Cavallaro schrieb:
> >> On 2007-07-30 03:24:07 -0400, Joachim Durchholz <··@durchholz.org> said:
> >> 
> >>> If this statement had any value, you'd have to drop Lisp NOW and start
> >>> programming in C++.
> >> 
> >> Not when the comparison is among *modern* statically typed languages and
> >> lisp.
> > 
> > Would you please quote full context, not have me hunting for it.
> > Oh, and making outrageous statements and qualifying them after the fact,
> > with an adjective like "modern" that will place the boundary wherever
> > you find it convenient, won't enlighten anybody.
> 
> Even leaving aside the appeal to majority, looking at the traffic on
> comp.lang.functional seems a poor way to judge the popularity of statically
> typed functional languages. For instance, the haskell-cafe mailing list
> alone gets more traffic than comp.lang.functional, in my experience
> (excepting the recent Jon Harrop threads, which seem to be fueled mainly by
> being cross-posted to comp.lang.lisp). There also seem to be OCaml mailing
> lists that get more traffic than you'll find here. Scala looks mailing-list
> based as well, and that may be true in general for most statically typed
> functional languages these days.
> 
> By similar logic, gmane seems to have no mirrors of lists about Scheme or
> Lisp, so one can only assume that nobody uses those languages. :)
> 
> -- Dan

Lisp on GMANE is right on the front.

http://dir.gmane.org/index.php?prefix=gmane.lisp

-- 
http://lispm.dyndns.org
From: Dan Doel
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <39989$46aecccf$d844bc44$15935@FUSE.NET>
Rainer Joswig wrote:
> Lisp on GMANE is right on the front.
> 
> http://dir.gmane.org/index.php?prefix=gmane.lisp

Ah! Too big to be stuck in the comp.lang section. :)

I remember seeing that a while ago, but forgot about it when I was looking
through all the programming language lists. I stand corrected.
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-D46F7B.07552831072007@news-europe.giganews.com>
In article <·····························@FUSE.NET>,
 Dan Doel <········@gmail.com> wrote:

> Rainer Joswig wrote:
> > Lisp on GMANE is right on the front.
> > 
> > http://dir.gmane.org/index.php?prefix=gmane.lisp
> 
> Ah! Too big to be stuck in the comp.lang section. :)
> 
> I remember seeing that a while ago, but forgot about it when I was looking
> through all the programming language lists. I stand corrected.

I was thinking that the GMANE guy(s) are Lisp fans and that
the Lisp hierarchy was added early.
See also 'Reticule' (http://reticule.gmane.org/), which
is an NNTP server written in CMUCL. Seems like
it is/was the underlying server for GMANE.

;-)

-- 
http://lispm.dyndns.org
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <2007073102090450073-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 21:29:34 -0400, Joachim Durchholz <··@durchholz.org> said:

> Would you please quote full context, not have me hunting for it.
> Oh, and making outrageous statements and qualifying them after the 
> fact, with an adjective like "modern" that will place the boundary 
> wherever you find it convenient, won't enlighten anybody.

The full context is the past x years of this discussion on c.l.f. and 
c.l.l. Supporters of static typing in c.l.f. object (rightly imho) to 
c, c++, and java being dragged in as examples of static typing because 
they don't have type inference. They (again, rightly) insist that we 
characterize static typing by the best that is currently available 
(haskell, ml ...). But this being so, one can't now drag c, c++, and 
java from the dust bin and bring them back into the discussion.

The discussion is, and has been for years, "which is better - the best 
of statically typed languages (ocaml, haskell ...) or the best of 
dynamically typed languages (lisp, scheme ...)?"

The discussion is not "which is better some mediocre statically typed 
language (c++, java ...) or some mediocre dynamically typed language 
(python, perl ...). This latter reminds me of the old joke about being 
up to your neck in a pool of vomit when someone throws a bucket of shit 
at your face - do you duck or not?
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad2c30$0$1610$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
>> However, in the fields I am interested in there are no well defined
>> problems. I need to learn more about the problem while working on it.
>> To do that I want to get rid of the edit->compile->run cycle.
> 
> You can do that in e.g. Haskell.

or OCaml, SML, F#...

> You can't have polymorphic lists directly, but there are straightforward
> ways around that.

Polymorphic recursion is also slightly harder.

>> I want an organic environment that I can change and that
>> can change itself during runtime.
> 
> What for?

To fix the type errors, of course.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8k3cb$hpp$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>>> However, in the fields I am interested in there are no well defined
>>> problems. I need to learn more about the problem while working on it.
>>> To do that I want to get rid of the edit->compile->run cycle.
>> You can do that in e.g. Haskell.
> 
> or OCaml, SML, F#...

I'm not aware of a good OCaml interpreter.
(I think SML does have it. Dunno about F#.)

>> You can't have polymorphic lists directly, but there are straightforward
>> ways around that.
> 
> Polymorphic recursion is also slightly harder.

I'm talking about monomorphic lists of closures.

>>> I want an organic environment that I can change and that
>>> can change itself during runtime.
 >>
>> What for?
> 
> To fix the type errors, of course.

Jon, that's just a strawman, which would be distracting from the actual 
issues if anybody takes it seriously.
I'm not interested in making look Lisp bad, I'm interested in finding 
out what exactly the problems (and nonproblems and advantages) of Lisp are.

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46adcc9a$0$1594$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> Joachim Durchholz wrote:
>>>> However, in the fields I am interested in there are no well defined
>>>> problems. I need to learn more about the problem while working on it.
>>>> To do that I want to get rid of the edit->compile->run cycle.
>>> You can do that in e.g. Haskell.
>> 
>> or OCaml, SML, F#...
> 
> I'm not aware of a good OCaml interpreter.
> (I think SML does have it. Dunno about F#.)

The top level?

>>>> You can't have polymorphic lists directly, but there are
>>>> straightforward ways around that. 
>>>
>>> You can't have polymorphic lists directly, but there are straightforward
>>> ways around that.
>> 
>> Polymorphic recursion is also slightly harder.
> 
> I'm talking about monomorphic lists of closures.

I thought you were talking about polymorphic lists?

>>>> I want an organic environment that I can change and that
>>>> can change itself during runtime.
>  >>
>>> What for?
>> 
>> To fix the type errors, of course.
> 
> Jon, that's just a strawman, which would be distracting from the actual
> issues if anybody takes it seriously.
> I'm not interested in making look Lisp bad, I'm interested in finding
> out what exactly the problems (and nonproblems and advantages) of Lisp
> are.

If your dynamic style of programming leaves trivial type errors in programs
that you will only discover once your long-running program has completed
you had better be running it in an interactive debugger so you can fix your
program and get your results. That isn't a strawman, it is the bread and
butter of dynamic typing.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad070f$0$1587$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> One namespace and implicit currying is trivial and can be done in one
> day. A good pattern matcher will take one week...

You told me you were going to do this yourself in Lisp 7 months ago. Seeing
as you obviously completely the task 6 months and three weeks ago, I was
wondering if you might be kind enough to share your results with us?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46adcd2b$0$1629$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Blume wrote:
> "you cannot make a list that contains "three" and 3 without defining a
> datatype first"

This last refuge of dynamic typing was ruthlessly nuked by Jacques Garrigue
years ago:

# [`String "three"; `Int 3];;
- : [> `Int of int | `String of string ] list = [`String "three"; `Int 3]

Nothing remains.

> ...
> Have you ever used Ocaml, or SML, or Haskell for a large program? Are
> you speaking from experience?  I myself came from a Lisp background...

I laboriously led this dog to water. It is choosing to die of thirst on
religious grounds whilst clutching aimlessly at a mirage named after a
speech impediment.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ad262a$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
Matthias Blume wrote:
> "you cannot make a list that contains "three" and 3 without defining a
> datatype first"

This last refuge of dynamic typing was ruthlessly nuked by Jacques Garrigue
years ago:

# [`String "three"; `Int 3];;
- : [> `Int of int | `String of string ] list = [`String "three"; `Int 3]

Nothing remains.

> ...
> Have you ever used Ocaml, or SML, or Haskell for a large program? Are
> you speaking from experience?  I myself came from a Lisp background...

I laboriously led this dog to water. It is choosing to die of thirst on
religious grounds whilst clutching aimlessly at a mirage named after a
speech impediment.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007073000302764440-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-29 19:22:22 -0400, Jon Harrop <···@ffconsultancy.com> said:

> It is choosing to die of thirst on
> religious grounds whilst clutching aimlessly at a mirage named after a
> speech impediment.

Then go away, spammer.
From: Matthias Blume
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m27ioicuwl.fsf@my.address.elsewhere>
Jon Harrop <···@ffconsultancy.com> writes:

> It is choosing to die of thirst on religious grounds whilst
> clutching aimlessly at a mirage named after a speech impediment.

Jon, can you just shut up?  Don't you understand that with this style
of "discussion" you will never convince anyone?

(Well, netnews discussions have rarely convinced anyone anyway.  But
what you are doing is worse:  You actively alienate people and give
Ocaml -- and, by association, the entire ML family and possibly even
Haskell -- a bad name.)

Matthias
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <200707300148127987-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-30 01:39:22 -0400, Matthias Blume <····@my.address.elsewhere> said:

> But
> what you are doing is worse:  You actively alienate people and give
> Ocaml -- and, by association, the entire ML family and possibly even
> Haskell -- a bad name.

I have to say, though I've had some heated exchanges with Mattias over 
the years, my interaction with him has only made me want to explore ml 
and haskell *more* because his posts tend to be so well reasoned and 
informative that I cannot but belive that there must be something to 
these languages if such an intelligent person sings their praises. Jon 
on the other hand just makes me want to never look at ocaml again.
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5sofc8r.fsf@ma-patru.mathematik.uni-karlsruhe.de>
Markus E Leypold <·····································@ANDTHATm-e-leypold.de> writes:

> Actually I don't see: First I do not understand "Because it sells
> books" and second, should that really refer to the fact that Jon has
> written a book (and selling it for money), it still got me confused:
> 
>  1. Jon didn't hype his book in this thread.
>  
>  2. That someone is working in a given subject area X and actually is
>     making money from it -- is that disqualifying him from making
>     useful and true statements on usenet? As opposed to all the people
>     around with no history in area X and no success?
> 
> I'm puzzled.
> 
> Regards -- Markus

This Harrop creature is spamming comp.lang.lisp for a long time now.  IIRC
he even admitted that the main purpose of his posts was to make more sales
for his books.

Apparently, he is not behaving that bad in comp.lang.functional.  You can
call yourself lucky.

Yours, Nicolas
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <a73azbtc29.fsf@hod.lan.m-e-leypold.de>
> Markus E Leypold <·····································@ANDTHATm-e-leypold.de> writes:
>
>> Actually I don't see: First I do not understand "Because it sells
>> books" and second, should that really refer to the fact that Jon has
>> written a book (and selling it for money), it still got me confused:
>> 
>>  1. Jon didn't hype his book in this thread.
>>  
>>  2. That someone is working in a given subject area X and actually is
>>     making money from it -- is that disqualifying him from making
>>     useful and true statements on usenet? As opposed to all the people
>>     around with no history in area X and no success?
>> 
>> I'm puzzled.
>> 
>> Regards -- Markus
>
> This Harrop creature is spamming comp.lang.lisp for a long time now.  IIRC
> he even admitted that the main purpose of his posts was to make more sales
> for his books.

That doesn't refute (2). And as far as "spamming" goes: I have
observed in this thread that some people have a curious definition of
spamming. Can't follow you there either.

>
> Apparently, he is not behaving that bad in comp.lang.functional.  You can

And that despite Rainer's attempt to carry the fight over to c.l.f ...

> call yourself lucky.

Or is that an indicator that there you need always 2 to get into a
fight? That the c.l.l crowd is not so blameless either? 

Regards -- Markus
From: Rainer Joswig
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <joswig-6A43F8.09394527072007@news-europe.giganews.com>
In article <··············@hod.lan.m-e-leypold.de>,
 ·····································@ANDTHATm-e-leypold.de (Markus 
 E.L.) wrote:

> > Markus E Leypold <·····································@ANDTHATm-e-leypold.de> writes:
> >
> >> Actually I don't see: First I do not understand "Because it sells
> >> books" and second, should that really refer to the fact that Jon has
> >> written a book (and selling it for money), it still got me confused:
> >> 
> >>  1. Jon didn't hype his book in this thread.
> >>  
> >>  2. That someone is working in a given subject area X and actually is
> >>     making money from it -- is that disqualifying him from making
> >>     useful and true statements on usenet? As opposed to all the people
> >>     around with no history in area X and no success?
> >> 
> >> I'm puzzled.
> >> 
> >> Regards -- Markus
> >
> > This Harrop creature is spamming comp.lang.lisp for a long time now.  IIRC
> > he even admitted that the main purpose of his posts was to make more sales
> > for his books.
> 
> That doesn't refute (2). And as far as "spamming" goes: I have
> observed in this thread that some people have a curious definition of
> spamming. Can't follow you there either.
> 
> >
> > Apparently, he is not behaving that bad in comp.lang.functional.  You can
> 
> And that despite Rainer's attempt to carry the fight over to c.l.f ...

That's a lie, Markus. 

> 
> > call yourself lucky.
> 
> Or is that an indicator that there you need always 2 to get into a
> fight? That the c.l.l crowd is not so blameless either? 
> 
> Regards -- Markus

-- 
http://lispm.dyndns.org
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8p644685xr.fsf@hod.lan.m-e-leypold.de>
>> And that despite Rainer's attempt to carry the fight over to c.l.f ...
>
> That's a lie, Markus. 

How so? Did you really think after flagging Jon as troll at c.l.l
(which certainly goes deeper and is more personal than saying "that's
OT here") it would be without consequences to post Jon's contribution
here? Or WAS that an attempt to disturb the ant hill and see what
happens?

And I even, in response to the first reaction "don't bring your bar
room brawls to c.l.f" , defended your right to post it there.

I'm really ready to hear that that wasn't your intention and that a
well meant attempt on stimulating discussion failed, but ...

Regards -- Markus
From: Nicolas Neuss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87k5smffbw.fsf@ma-patru.mathematik.uni-karlsruhe.de>
·····································@ANDTHATm-e-leypold.de (Markus E.L.) writes:

> > Markus E Leypold <·····································@ANDTHATm-e-leypold.de> writes:
> >
> >> Actually I don't see: First I do not understand "Because it sells
> >> books" and second, should that really refer to the fact that Jon has
> >> written a book (and selling it for money), it still got me confused:
> >> 
> >>  1. Jon didn't hype his book in this thread.
> >>  
> >>  2. That someone is working in a given subject area X and actually is
> >>     making money from it -- is that disqualifying him from making
> >>     useful and true statements on usenet? As opposed to all the people
> >>     around with no history in area X and no success?
> >> 
> >> I'm puzzled.
> >> 
> >> Regards -- Markus
> >
> > This Harrop creature is spamming comp.lang.lisp for a long time now.  IIRC
> > he even admitted that the main purpose of his posts was to make more sales
> > for his books.
> 
> That doesn't refute (2). And as far as "spamming" goes: I have
> observed in this thread that some people have a curious definition of
> spamming. Can't follow you there either.

As I said elsewhere, the sig is no problem per se, but if it is contained
in hundreds of OCaml/F# promoting posts in a newsgroup where this stuff
does not belong, it is a clear sign for either trolling or spamming (choose
one of those).

> Or is that an indicator that there you need always 2 to get into a
> fight? That the c.l.l crowd is not so blameless either?

Believe what you want.  Do I really have to go through the last months of
comp.lang.lisp to prove my fact?  Maybe you can tell me why his profile

http://groups.google.de/groups/profile?enc_user=I_YUthUAAACWD_8VFKtRU42NeunWF-drfMq7BcOOnMpM9MYZ86CqoA&hl=en

is so bad and shows more posts in comp.lang.lisp than everywhere else...

Nicolas
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <byy7h26r7k.fsf@hod.lan.m-e-leypold.de>
> Believe what you want.  Do I really have to go through the last months of
> comp.lang.lisp to prove my fact?  Maybe you can tell me why his profile
>
> http://groups.google.de/groups/profile?enc_user=I_YUthUAAACWD_8VFKtRU42NeunWF-drfMq7BcOOnMpM9MYZ86CqoA&hl=en
>
> is so bad and shows more posts in comp.lang.lisp than everywhere else...

I see. Google profiles are practically a judgment from God? Or do they
just reflect that people (and people using Google as news reader at
that) don't agree with Jon?

Regards -- Markus
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <8vr6mu3qv0.fsf@hod.lan.m-e-leypold.de>
> Google profiles are not judgement of god but they could give you a
> good hint especially when the number of reviews are quite high for
> example I found only 3 lispers who exceed 100 reviews (I didn't
> searched very hard)


Meaning, there are hardly good Lispers, only bad ones (JH). What does
that say to us about the way Google profiles are used? And the
validity to use them as a measure of someones trollishness?

(BTW: I _am_ sure, if Andrew Tanenbaum would pots on Usenet, I'm sure
you could get him voted down to hell in almost no time: He doesn't
"suffer fools gladly" as he says himself and there are a lot of
Linux-tru-only fan boys around that would not understand a word he is
saying, but that he is disqualifying Linux.

Regards -- Markus (Who's been using and enjoying Linux for years: But
I'm against irrational beliefs, so ...)

PS: Care to quote my original articles you're answering to? Usenet is
    not a forum and not Google Groups and it's considered good
    netiquette to provide some context.
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185780052.825516.51010@22g2000hsm.googlegroups.com>
On Jul 27, 3:42 pm, ·····································@ANDTHATm-e-
leypold.de (Markus E.L.) wrote:
> > Google profiles are not judgement of god but they could give you a
> > good hint especially when the number of reviews are quite high for
> > example I found only 3 lispers who exceed 100 reviews (I didn't
> > searched very hard)
>
> Meaning, there are hardly good Lispers, only bad ones (JH). What does
> that say to us about the way Google profiles are used? And the
> validity to use them as a measure of someones trollishness?
>
> (BTW: I _am_ sure, if Andrew Tanenbaum would pots on Usenet, I'm sure
> you could get him voted down to hell in almost no time: He doesn't
> "suffer fools gladly" as he says himself and there are a lot of
> Linux-tru-only fan boys around that would not understand a word he is
> saying, but that he is disqualifying Linux.
>
> Regards -- Markus (Who's been using and enjoying Linux for years: But
> I'm against irrational beliefs, so ...)
>
> PS: Care to quote my original articles you're answering to? Usenet is
>     not a forum and not Google Groups and it's considered good
>     netiquette to provide some context.

 There are only 3 accounts that I could found that have more than 100
ratings. Kent Pittman, Pascal Constanza & Pascal Bourguignon, they all
have 4 star rating. If you check for the regular posters you'll find
that most of them have 4 stars, some have 3 stars and quite few 5 star
rating. The only person in this newsgroup who have lower than 3 stars
are gavino (122 ratings) & our resident spammer (1054 ratings) .
They're both recognized as troll & spammer. And it seem to me that
you're one of them .


This reply was only because safari eat most of my post, I won't reply
nor read anything what you say in the future.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <avr6mq1c2r.fsf@hod.lan.m-e-leypold.de>
> On Jul 27, 3:42 pm, ·····································@ANDTHATm-e-
> leypold.de (Markus E.L.) wrote:
>> > Google profiles are not judgement of god but they could give you a
>> > good hint especially when the number of reviews are quite high for
>> > example I found only 3 lispers who exceed 100 reviews (I didn't
>> > searched very hard)
>>
>> Meaning, there are hardly good Lispers, only bad ones (JH). What does
>> that say to us about the way Google profiles are used? And the
>> validity to use them as a measure of someones trollishness?
>>
>> (BTW: I _am_ sure, if Andrew Tanenbaum would pots on Usenet, I'm sure
>> you could get him voted down to hell in almost no time: He doesn't
>> "suffer fools gladly" as he says himself and there are a lot of
>> Linux-tru-only fan boys around that would not understand a word he is
>> saying, but that he is disqualifying Linux.
>>
>> Regards -- Markus (Who's been using and enjoying Linux for years: But
>> I'm against irrational beliefs, so ...)
>>
>> PS: Care to quote my original articles you're answering to? Usenet is
>>     not a forum and not Google Groups and it's considered good
>>     netiquette to provide some context.
>
>  There are only 3 accounts that I could found that have more than
> 100 ratings. Kent Pittman, Pascal Constanza & Pascal Bourguignon,
> they all have 4 star rating. If you check for the regular posters
> you'll find that most of them have 4 stars, some have 3 stars and
> quite few 5 star rating. The only person in this newsgroup who have
> lower than 3 stars are gavino (122 ratings) & our resident spammer
> (1054 ratings) .  They're both recognized as troll & spammer.  And
> it seem to me that you're one of them .

I'm absolutely amazed by the oracular power some people attribute to
"Google Profiles" or whatever "rating" systems.

> This reply was only because safari eat most of my post, I won't
> reply nor read anything

Good. I think this topic is experiencing a certain stagnation anyway.

> what you say in the future.

Good bye :-) -- Markus
From: Cor Gest
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87vec8rp60.fsf@telesippa.clsnet.nl>
Some entity, AKA Markus E Leypold <·····································@ANDTHATm-e-leypold.de>,
wrote this mindboggling stuff:
(selectively-snipped-or-not-p)

>  2. That someone is working in a given subject area X and actually is
>     making money from it -- is that disqualifying him from making
>     useful and true statements on usenet? As opposed to all the people
>     around with no history in area X and no success?
> 
> I'm puzzled.

Don't be,

It is rather simple, if you want to build a house there are is a long
list of 'environmental contraints' to it'sconstruction, like geology,
climate, availability and cost of materials, ease of maintenance and
change, to name just a few.
A good designed house can have a timberframe or reinforced concrete as the
base structure, both give an livingspave when finished, or any other
basic-structure for that matter.   
But if someone sells rebar, he will loath a timberframe as a
wood salesman wil pity any concrete setup.
Of course there are some drawbacks as to ease of change, which, with both,
can be a horror or a blessing, it just depends how often you want to go
through that ordeal.   

Cor

-- 
	 (defvar MyComputer '((OS . "GNU/Emacs") (IPL . "GNU/Linux"))) 
The biggest problem LISP has, is that it does not appeal to dumb people
    If all fails to satisfy you read the HyperSpec or info woman 
			 mailpolicy @ http://www.clsnet.nl/mail.php
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <vwvec7lebh.fsf@hod.lan.m-e-leypold.de>
> > >>> Humm... I still find your comparison loaded: you rule out the use
> > >>> of libraries for pattern matching in Lisp. Why?
> > >> Because it sells books.
> > >>
> > > I see. . .

> > Actually I don't see: First I do not understand "Because it sells
> > books" and second, should that really refer to the fact that Jon has
> > written a book (and selling it for money), it still got me confused:

> >  2. That someone is working in a given subject area X and actually is
> >     making money from it -- is that disqualifying him from making
> >     useful and true statements on usenet? As opposed to all the people
> >     around with no history in area X and no success?
> > 
> > I'm puzzled.
>
> Don't be,
>
> It is rather simple, if you want to build a house there are is a long
> list of 'environmental contraints' to it'sconstruction, like geology,
> climate, availability and cost of materials, ease of maintenance and
> change, to name just a few.
> A good designed house can have a timberframe or reinforced concrete as the
> base structure, both give an livingspave when finished, or any other
> basic-structure for that matter.   
> But if someone sells rebar, he will loath a timberframe as a
> wood salesman wil pity any concrete setup.
> Of course there are some drawbacks as to ease of change, which, with both,
> can be a horror or a blessing, it just depends how often you want to go
> through that ordeal.   

Ah, I see. There is no need to actually give arguments against Jon's
position: It's automatically discredited by the fact that he makes a
living from his know-how which also implies that he MUST be
wrong. Absolutely.

We should probably handle our relationship to medical doctors the same
way: Not believing their diagnosis since they are -- GASP -- earning
money from their practice.

Or just wait -- now I see, it's all because Jon has no timber
reinforced concrete in his base structure libraries and is trying to
push a rebar book. It's all about architecture. Now there!

Well, well.

Regards -- Markus
From: Cor Gest
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <87ir861uii.fsf@telesippa.clsnet.nl>
Some entity, AKA ·····································@ANDTHATm-e-leypold.de (Markus E.L.),
wrote this mindboggling stuff:
(selectively-snipped-or-not-p)

> Or just wait -- now I see, it's all because Jon has no timber
> reinforced concrete in his base structure libraries and is trying to
> push a rebar book. It's all about architecture. Now there!
> 
> Well, well.

Of course not, it's about the paintjob ...
allmost nobody cares what hides behind that shiny layer.

Cor
-- 
	 (defvar MyComputer '((OS . "GNU/Emacs") (IPL . "GNU/Linux"))) 
The biggest problem LISP has, is that it does not appeal to dumb people
    If all fails to satisfy you read the HyperSpec or info woman 
			 mailpolicy @ http://www.clsnet.nl/mail.php
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185457662.692483.56970@57g2000hsv.googlegroups.com>
On Jul 24, 12:03 pm, Markus E Leypold
<·····································@ANDTHATm-e-leypold.de> wrote:
> > Dan Bensen escreveu:
> >> Cesar Rabak wrote:
> >>> Jon Harrop escreveu:
> >>>> Pattern matching is the single biggest advantage and is the main
> >>>> reason why OCaml, SML, Haskell and F# are all much more concise
> >>>> than Common Lisp.
> >>> Humm... I still find your comparison loaded: you rule out the use
> >>> of libraries for pattern matching in Lisp. Why?
> >> Because it sells books.
>
> > I see. . .
>
> Actually I don't see: First I do not understand "Because it sells
> books" and second, should that really refer to the fact that Jon has
> written a book (and selling it for money), it still got me confused:
>
>  1. Jon didn't hype his book in this thread.
Yes  he do. He even counts clicks coming from usenet :
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

If you can't recognize spammer who doesn't even try to conceal himself
than ...I'm out of word.

>
>  2. That someone is working in a given subject area X and actually is
>     making money from it -- is that disqualifying him from making
>     useful and true statements on usenet?

It's disqualifying him from making unbiased statements.  How about
asking Oracle salesperson about strengths & weaknesses of various
DBMSs , "Postgre  don't make me lough ...MYSQL it's doesn't even
support ..., SQL Server runs only on ... DB2 it's too ..."

> As opposed to all the people
>    around with no history in area X and no success?

That's already insulting.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <58hcnrtex6.fsf@hod.lan.m-e-leypold.de>
> On Jul 24, 12:03 pm, Markus E Leypold
> <·····································@ANDTHATm-e-leypold.de> wrote:
>> > Dan Bensen escreveu:
>> >> Cesar Rabak wrote:
>> >>> Jon Harrop escreveu:
>> >>>> Pattern matching is the single biggest advantage and is the main
>> >>>> reason why OCaml, SML, Haskell and F# are all much more concise
>> >>>> than Common Lisp.
>> >>> Humm... I still find your comparison loaded: you rule out the use
>> >>> of libraries for pattern matching in Lisp. Why?
>> >> Because it sells books.
>>
>> > I see. . .
>>
>> Actually I don't see: First I do not understand "Because it sells
>> books" and second, should that really refer to the fact that Jon has
>> written a book (and selling it for money), it still got me confused:
>>
>>  1. Jon didn't hype his book in this thread.
> Yes  he do. He even counts clicks coming from usenet :
> http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet

God, no. He's even counting clicks! How depraved!

> If you can't recognize spammer who doesn't even try to conceal himself
> than ...I'm out of word.

I'm certain you can go and ask his provider to suspend his
access. Providers don't like spammers, I hear.

>>  2. That someone is working in a given subject area X and actually is
>>     making money from it -- is that disqualifying him from making
>>     useful and true statements on usenet?
>
> It's disqualifying him from making unbiased statements.  How about
> asking Oracle salesperson about strengths & weaknesses of various
> DBMSs , "Postgre  don't make me lough ...MYSQL it's doesn't even
> support ..., SQL Server runs only on ... DB2 it's too ..."

I absolutely concur that I don't need and don't want to take a sales
persons statements just on authority alone. But ...

 - JH sells a book and consulting services, not OCaml. He works with
   OCaml, he doesn't make OCaml. Should we now ostracize anybody who
   touts his (or her) tools? Like Linus Torvalds: He's working with
   Linux, he's even proved (with Linux) that he can program, now he
   even profits from that: Don't believe a single statement of what he
   says!? Linux is vaporware!

 - Having a stake in something doesn't prevent one from making true
   statements in that area. Statements don't become automatically
   untrue (this assumption is indeed one of the well know logical
   fallacies). Nobody expects that you take them by authority. You can
   disprove them or demand proof. But logically you can't disqualify
   them on the basis that the speaker might profit if/when they are
   true.

 - I understand that I'm expected to distrust everything Jon said,
   because he has written and is selling a book on OCaml. On the other
   side I'm expected to accept protestations from c.l.l about Lisp and
   what can be done with Lisp without further proof (i.e. a running
   program) even if a number of people there (like, e.g. Rainer
   Joswig) have undoubtly a professional reputation in that area?
   Isn't your logic somewhat flawed.

>> As opposed to all the people
>>    around with no history in area X and no success?
>
> That's already insulting.

How so? Do you seriously propose that everyone posting in c.l.l and
c.l.f has a proven track record in functional programming, is
qualified to make a comparison of functional languages and should
therefore be taken on authority? If so, why doesn't it apply to Jon
Harrop (who is posting on c.l.l ...)? If not, why is it insulting to
presume that not everyone posting in c.l.[fl], especially in this
thread here, has the qualification?

You mystify me.

Regards -- Markus
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007072701154850073-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-26 10:31:17 -0400, 
·····································@ANDTHATm-e-leypold.de (Markus 
E.L.) said:

> You mystify me.

Let's put it simply:

1. A definition of spam is "communication that is irrelevant or 
inappropriate to a given forum, often advertising a commercial product 
or service."
2. From (1) we see that:

business signature + on topic for forum = legitimate post

business signature + off topic for forum = spam


3. Jon often posts to c.l.l *only*  (i.e., without also posting to c.l.f.)
4. Jon's often posts about ocaml to c.l.l *only*.
5. Posts about ocaml, though on topic in c.l.f., are certainly off 
topic in c.l.l.
6. Since Jon posts to an inappropriate forum (posts about ocaml to 
c.l.l) with a commercial signature (i.e., advertising commercial 
products and services) his posts about ocaml to c.l.l. are spam.

In summary:

If you post on topic to any forum with your business signature it is 
not spam because your post is *on topic for that forum*. If you post 
off topic to any forum with your business signature it is *spam*.

Since his posts to c.l.l are overwhelmingly off topic for his 
participation in that forum with his business signature is spam.

To conclude:

Jon should post to c.l.f. about ocaml but he doesn't - why? Because 
comparatively few people read or post to c.l.f. and Jon's aim is to 
reach as many potential customers has possible. So Jon spams the full 
house of c.l.l. instead of posting to the appropriate, but 
comparatively empty theater of c.l.f.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <49ejiu86lo.fsf@hod.lan.m-e-leypold.de>
> 3. Jon often posts to c.l.l *only*  (i.e., without also posting to c.l.f.)

First time I hear somebody being critised for _not_ cross posting.

> 4. Jon's often posts about ocaml to c.l.l *only*.

You sure the word "Lisp" was totally absent from those posts? And they
were thread starters? Hm.

> Jon should post to c.l.f. about ocaml but he doesn't - why? Because
> comparatively few people read or post to c.l.f. and Jon's aim is to
> reach as many potential customers has possible. 

Don't you think the entrenched Lisp community is exactly that (customers) not?

And thanks for bringing all that to c.l.f. BTW: Does that qualify as
spam by your questionnaire? I can't decide from the top off my head.

Regards -- Markus
From: Raffael Cavallaro
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2007072800324843658-raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2007-07-27 06:48:51 -0400, 
·····································@ANDTHATm-e-leypold.de (Markus 
E.L.) said:

> And thanks for bringing all that to c.l.f. BTW: Does that qualify as
> spam by your questionnaire? I can't decide from the top off my head.

Do you see any commercial advertisting in my post? Don't know about the 
top of your head, but the inside seems farily vacant.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <yvk5si2smo.fsf@hod.lan.m-e-leypold.de>
> On 2007-07-27 06:48:51 -0400,
> ·····································@ANDTHATm-e-leypold.de (Markus
> E.L.) said:
>
>> And thanks for bringing all that to c.l.f. BTW: Does that qualify as
>> spam by your questionnaire? I can't decide from the top off my head.
>
> Do you see any commercial advertisting in my post? Don't know about
> the top of your head, but the inside seems farily vacant.

Thanks for clearing that up. Your point (1) was 

| communication that is irrelevant or inappropriate to a given forum,
| often advertising a commercial product or service.

so, because of the "often" in this definition, I weren't totally
sure. 

Regards -- Markus
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185524782.141965.111180@22g2000hsm.googlegroups.com>
Hi Marcus
I haven't heard that Linus Torvalds  came everyday to bother people in
newsgroups or forums specially dedicated to discuss Windows, FreeBSD &
Solaris and tells them how their OS sucks and they should switch to
Linux immediately. That's exactly what our resident spammer is doing
for a long, long time. Beside you are forgetting the economical
factor, spammer is here to sell his merchandise, he is at a his
working place right here to find every statements and read every study
that helps him achieve his goal, the other posters come only to read
something interesthing and answer with whatever free time they want to
spare. So spammer could spent days finding examples that makes his
views look better while the people who want to challenge his remarks
have only minutes.That makes his views biased, you can listen them and
you can try them but whatever you do keep in mind that his final goal
is to sell.

regards
Slobodan
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <tnps2e6qa0.fsf@hod.lan.m-e-leypold.de>
> spare. So spammer could spent days finding examples that makes his
> views look better while the people who want to challenge his remarks
> have only minutes.

You certainly got a point here: I don't have so much time to correct
the bad manners of a certainly largish c.l.l group. So, after saying
my piece and getting the same non-sequitur answers a couple of times
("But don't you see, ... he IS a spammer/troll?") I'll have to do
something else. You people's minutes add up to hours of my time.

Regards -- Markus
From: Slobodan Blazeski
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185779316.920329.238600@w3g2000hsg.googlegroups.com>
On Jul 27, 1:26 pm, ·····································@ANDTHATm-e-
leypold.de (Markus E.L.) wrote:
> > spare. So spammer could spent days finding examples that makes his
> > views look better while the people who want to challenge his remarks
> > have only minutes.
>
> You certainly got a point here: I don't have so much time to correct
> the bad manners of a certainly largish c.l.l group. So, after saying
> my piece and getting the same non-sequitur answers a couple of times
> ("But don't you see, ... he IS a spammer/troll?") I'll have to do
> something else. You people's minutes add up to hours of my time.
>
> Regards -- Markus

At first I thought you were a troll, now I'm sure you are. Have a nice
life I won't read your posts anymore.
From: Tony Finch
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <fRE*tNtQr@news.chiark.greenend.org.uk>
Jon Harrop <···@ffconsultancy.com> wrote:
>
>Lisp's verbosity stems primarily from its use of whitespace and parentheses
>as well as a lack of pattern matching. You can see this in almost any
>comparable programs written in the two languages (or any languages with the
>similar features, e.g. Haskell vs Scheme). Look at the intersect routines
>from my ray tracer.

I think the comparison would be fairer if you used the same variable names
in each language. You've used words in the Lisp and single letters in the
O'Caml, which undermines your argument. You have also wasted vertical
space in the Lisp, and maximized vertical compression in the O'Caml,
in both cases more than I would say is normal.

Tony.
-- 
f.a.n.finch  <···@dotat.at>  http://dotat.at/
PORTLAND PLYMOUTH: NORTHWESTERLY BACKING SOUTHWESTERLY 4 OR 5, INCREASING 6 OR
7 FOR A TIME. SLIGHT OR MODERATE, OCCASIONALLY ROUGH. RAIN OR THUNDERY
SHOWERS. MODERATE OR GOOD.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f84tm8$gdd$1@online.de>
Tony Finch schrieb:
> I think the comparison would be fairer if you used the same variable names
> in each language. You've used words in the Lisp and single letters in the
> O'Caml, which undermines your argument.

Only partly.
In shorter code, fully descriptive names aren't as relevant since you 
have fewer lines to check for cross-referencing code.
I see these name terseness elsewhere in functional code, including code 
from people who are generally considered as "writing good style". (Take 
a look at the Haskell Prelude, for example.)

Regards,
Jo
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <qv1wex7v7q.fsf@hod.lan.m-e-leypold.de>
> Tony Finch schrieb:
>> I think the comparison would be fairer if you used the same variable names
>> in each language. You've used words in the Lisp and single letters in the
>> O'Caml, which undermines your argument.
>
> Only partly.
> In shorter code, fully descriptive names aren't as relevant since you

Rule of thumb for me: The larger the scope the more descriptive the
variable name should be. Actually modify that by how far away the
corresponding type definitions have been made:

  let f l = ...  (* bad *)

  let f customer_list = ... (* better *)

  let f (customers : customer list) = ...  (* best *)


But 

  let f (customers ...) =

     ...

     let process cs ... =
         ...

     in 

         process customers ... ;;


because all information is there locally to derive what 'cs' is.

> have fewer lines to check for cross-referencing code.
> I see these name terseness elsewhere in functional code, including
> code from people who are generally considered as "writing good
> style". (Take a look at the Haskell Prelude, for example.)

:-).

I think this is a bit like naming conventions in physics: Even in
printed texts, it's always V that is used for voltage. In the same
spirit I use n,k,i for integers and l (or sometimes haskell style 'xs'
or 'as') for (local) variables. Of course code becomes less
understandable for other people this way, but that's what types are
for (so I try to choose descriptive type names and then l, being of
type 'customer list' is immediately understandable again).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a726ad$0$1614$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E Leypold wrote:
> Rule of thumb for me: The larger the scope the more descriptive the
> variable name should be. Actually modify that by how far away the
> corresponding type definitions have been made:
> 
>   let f l = ...  (* bad *)
> 
>   let f customer_list = ... (* better *)
> 
>   let f (customers : customer list) = ...  (* best *)

Yes. I agree in spirit: longer-lived values deserve more descriptive names
in any language.

> But
> 
>   let f (customers ...) =
> 
>      ...
> 
>      let process cs ... =
>          ...
> 
>      in
> 
>          process customers ... ;;
> 
> because all information is there locally to derive what 'cs' is.

This might be considered bad style in ML where it is preferable to write
many small, separate functions rather than nesting definitions. Hiding is
provided by module signatures.

For example, the "length" function from the OCaml standard library:

let rec length_aux len = function
    [] -> len
  | a::l -> length_aux (len + 1) l

let length l = length_aux 0 l

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xcz7ioqkiyg.fsf@cbe.ericsson.se>
>>>>> "Jon" == Jon Harrop <···@ffconsultancy.com> writes:

  Jon> Joachim Durchholz wrote:
  >>  There are two answers to that:
  >>
  >> 1. Coding doesn't take longer,

  Jon> Even if there is four times as much code?

  >>  but you can't place the same amount of code on a screenful, so
  >>  debugging and maintenance will take longer.

  Jon> Yes. I would expect that to result in superlinear degredation
  Jon> of development speed with respect to LOC.

I've been quoted as stating that development speed in terms
of lines of code per man-hour* seems to be the same regardless
of language, and that LOC reduction should result in roughly
linear improvement in development speed. We've noted that the
same seems to be true for fault density**, with corresponding 
effects on product quality. This would seem to support your
assumption about a superlinear difference overall 

*  Development + testing up until product release
** Faults found in the field, measured in faults/KLOC

I think a long-term effect of relieving the programmer
of concern for low-level memory management, locking,
etc. will eventually allow programmers to adjust their
frame of mind and ways of working (e.g. smaller projects,
less bureaucracy, perhaps fewer and better programmers***),
will give additional factors of productivity improvement,
but this is mainly speculation, which BTW would seem to 
invalidate my initial assumption, but support yours. ;-)

*** While one should really have top-notch programmers to
    get away with C++ programming in the large, the need 
    for large projects tends to drive away the best 
    programmers.

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xabtmli37.fsf@ruckus.brouhaha.com>
Ulf Wiger <·······@cbe.ericsson.se> writes:
> I've been quoted as stating that development speed in terms
> of lines of code per man-hour* seems to be the same regardless
> of language, and that LOC reduction should result in roughly
> linear improvement in development speed. 

I wonder about this.  My hat off to anyone who can code in Haskell as
fast in LOC/hour as they can code in Java.  Of course the Java LOC
only do 1/10th as much, so coding 2x slower still leaves one ahead by
a factor of 5.
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xczbqe2s5ib.fsf@cbe.ericsson.se>
>>>>> "Paul" == Paul Rubin <·············@NOSPAM.invalid> writes:

  Paul> Ulf Wiger <·······@cbe.ericsson.se> writes:
  >>  I've been quoted as stating that development speed in terms of
  >>  lines of code per man-hour* seems to be the same regardless of
  >>  language, and that LOC reduction should result in roughly linear
  >>  improvement in development speed.

  Paul> I wonder about this.  My hat off to anyone who can code in
  Paul> Haskell as fast in LOC/hour as they can code in Java.  Of
  Paul> course the Java LOC only do 1/10th as much, so coding 2x
  Paul> slower still leaves one ahead by a factor of 5.

I believe this is basically the point. For a sufficiently 
difficult problem, understanding how to build the program
takes much longer than writing it down. The speed of actually 
writing the code will depend on the ratio of trivial vs tricky
code. When trying to understand what you wrote, the code that is 
secondary to solving the actual problem will get in your way and
slow you down.

Our frame of reference was very large projects, where the actual
writing of code is a fairly small part of the overall project
time. And it's just a superficial observation, from comparing
actual metrics from several projects using different technologies.
It's only a little bit more solid than "a watched pot never boils."

The observation itself is not particularly novel. Brooks reported
someone as drawing the same conclusion in The Mythical Man-Month
(I don't have a copy of the book, so I can't check the reference).
We checked the numbers for a few of our projects and observed that
they seemed to corroborate Brooks' old rule of thumb, QED. (:


BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a5cf24$0$1629$ed2619ec@ptn-nntp-reader02.plus.net>
Cesar Rabak wrote:
> Jon Harrop escreveu:
>> Pattern matching is the single biggest advantage and is the main reason
>> why OCaml, SML, Haskell and F# are all much more concise than Common
>> Lisp. Look at the amount of code doing destructing in the above examples.
> 
> Humm... I still find your comparison loaded: you rule out the use of
> libraries for pattern matching in Lisp. Why?

Pattern matching in Lisp was in no way prohibited or even discouraged in any
of these examples. The vast majority of Lisp programmers never reach for
more sophisticated functionality, opting instead to hand code everything in
a hopelessly unmaintainable way. See Andre Thieme or Nathan Froyd's
implementations:

  http://www.lambdassociates.org/studies/study10.htm

Some people did try to use pattern matching from Lisp but it remained
uncompetitive:

Dan Bensen Greenspunned by lashing up an ad-hoc bug-ridden
informally-specified implementation of half of an ML pattern matcher for
his symbolic simplifier. His implementation remains longer and slower than
the OCaml.

Mark Tarver went the extra mile and put years of effort into making the most
sophisticated replica of modern functional programming languages for Lisp.
As you can see from these results, the Qi implementations remains slower
and longer than the OCaml.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <l6ps2iqd9a.fsf@hod.lan.m-e-leypold.de>
> Cesar Rabak wrote:
>> Jon Harrop escreveu:
>>> Pattern matching is the single biggest advantage and is the main reason
>>> why OCaml, SML, Haskell and F# are all much more concise than Common
>>> Lisp. Look at the amount of code doing destructing in the above examples.
>> 
>> Humm... I still find your comparison loaded: you rule out the use of
>> libraries for pattern matching in Lisp. Why?
>
> Pattern matching in Lisp was in no way prohibited or even discouraged in any
> of these examples. The vast majority of Lisp programmers never reach for
> more sophisticated functionality, opting instead to hand code everything in
> a hopelessly unmaintainable way. See Andre Thieme or Nathan Froyd's
> implementations:
>
>   http://www.lambdassociates.org/studies/study10.htm
>
> Some people did try to use pattern matching from Lisp but it remained
> uncompetitive:
>
> Dan Bensen Greenspunned by lashing up an ad-hoc bug-ridden
> informally-specified implementation of half of an ML pattern matcher for
> his symbolic simplifier. His implementation remains longer and slower than
> the OCaml.
>
> Mark Tarver went the extra mile and put years of effort into making the most
> sophisticated replica of modern functional programming languages for Lisp.
> As you can see from these results, the Qi implementations remains slower
> and longer than the OCaml.

Considering that Qi is more than a " replica of modern functional
programming languages" but rather (as I understood it) a framework to
define type systems, I think that last paragraph is somewhat unfair.

Regards -- Markus
From: Boris Borcic
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46b9944b$1_3@news.bluewin.ch>
Jon Harrop wrote:
> 
>   http://www.lambdassociates.org/studies/study10.htm
> 

[...]

> As you can see from these results, the Qi implementations remains slower
> and longer than the OCaml.

Makes one wonder how you measure length...

         OCaml     Qi     ratio

LOCs       15     15      1.00
chars     546    387      0.71
tokens    224    150      0.67

And I'd say few could deny that the Qi code is quite easier to read.

Cheers,

Boris Borcic
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f84t2o$fkn$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> There are two answers to that:
>>
>> 1. Coding doesn't take longer,
> 
> Even if there is four times as much code?

Not enough of a difference to get above the noise level.
I spend 90% of my programming time designing things. (The other 90% are 
debugging and maintenance, of course *g*)

I also happen to be a rather adept blind typist, so I wouldn't even 
*think* about the parentheses.

>> 2. You can always count nodes in the AST instead of lines of code.
> 
> Lisp's verbosity stems primarily from its use of whitespace and parentheses
> as well as a lack of pattern matching.

Ah, right, lack of pattern matching tends to bloat any code.
I didn't notice this since the example code given didn't have any case 
distinctions.

I'm still surprised it isn't already in widespread use in any new 
language. I've been seriously missing that in any language that I've 
been using since I got to know the concept (and even before).

 > You can see this in almost any
> comparable programs written in the two languages (or any languages with the
> similar features, e.g. Haskell vs Scheme). Look at the intersect routines
> from my ray tracer. First the Lisp:
> 
> (defun intersect (orig dir scene)
>   (labels ((aux (lam normal scene)
>              (let* ((center (sphere-center scene))
>                     (lamt (ray-sphere orig
>                                       dir
>                                       center
>                                       (sphere-radius scene))))
>                (if (>= lamt lam)
>                    (values lam normal)
>                    (etypecase scene
>                      (group
>                       (dolist (kid (group-children scene))
>                         (setf (values lam normal)
>                               (aux lam normal kid)))
>                       (values lam normal))
>                      (sphere
>                       (values lamt (unitise
>                                     (-v (+v orig (*v lamt dir)) center)))))))))
>     (aux infinity zero scene)))
> 
> Then the OCaml:
> 
> let rec intersect o d (l, _ as hit) (c, r, s) =
>   let l' = ray_sphere o d c s in
>   if l' >= l then hit else match s with
>     [] -> l', unitise (o +| l' *| d -| c)
>   | ss -> List.fold_left (intersect o d) hit ss

I think the Lisp would look better if it weren'd indented so much.
It might even get slightly shorter because some stuff could be written 
inline.
(I can't say whether the above Lisp could be written more concisely though.)

>> For 
>> the above example, you'd end up at roughly the same figures for Lisp and
>> your generic FPL, but as soon as you declare macros in Lisp, the FPL
>> needs less nodes.
> 
> Are you saying that macros reduce code size?

First, I'm (wrongly) assuming that Lisp's relative verboseness comes 
from parentheses (and additional whitespace needed because those 
parentheses force you into more line wraps).

Seconds, I'm saying that needing macros means that you often writen the 
same semantics twice, once as a function and once (for those cases where 
it's a useful optimization) as a macro.
So a language that works well without macros is shorter.
(I assume that macros are more a library thing, not something that gets 
written routinely or redundantly, so I don't think the effect is large.)

So, no, I'm saying that needing macros tends to increase code size, 
though I can't say it would be much and suspect it isn't much indeed.

Regards,
Jo
From: Andy Freeman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185295264.670456.310620@d30g2000prg.googlegroups.com>
On Jul 24, 5:59 am, Joachim Durchholz <····@durchholz.org> wrote:
> Seconds, I'm saying that needing macros means that you often writen the
> same semantics twice, once as a function and once (for those cases where
> it's a useful optimization) as a macro.

(1) One doesn't "need" macros any more than one "needs" iteration
statments - both are merely more convenient than the alternatives in
certain situations.
(2) "often"?  I almost never write something as both a macro and a
function.  In fact, the whole point of writing something as a macro is
usually that there's no reasonable way to write the semantics as a
function.

For example, consider python 2.6's "with" statement.  There's no clean
way to write it as a function, so pre 2.6 code basically repeats
with's definition everywhere the functionality is used.

-andy
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85pb4$e3h$1@registered.motzarella.org>
Andy Freeman schrieb:
> On Jul 24, 5:59 am, Joachim Durchholz <····@durchholz.org> wrote:
>> Seconds, I'm saying that needing macros means that you often writen the
>> same semantics twice, once as a function and once (for those cases where
>> it's a useful optimization) as a macro.
> 
> (1) One doesn't "need" macros any more than one "needs" iteration
> statments - both are merely more convenient than the alternatives in
> certain situations.
> (2) "often"?  I almost never write something as both a macro and a
> function.  In fact, the whole point of writing something as a macro is
> usually that there's no reasonable way to write the semantics as a
> function.
> 
> For example, consider python 2.6's "with" statement.  There's no clean
> way to write it as a function, so pre 2.6 code basically repeats
> with's definition everywhere the functionality is used.

No clean way to do it in Python.
In Lisp and Haskell one could get such a "statement" in a clean way.


Andr�
-- 
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3z8x957vju.fsf@hod.lan.m-e-leypold.de>
> Ah, right, lack of pattern matching tends to bloat any code.
> I didn't notice this since the example code given didn't have any case
> distinctions.
>
> I'm still surprised it isn't already in widespread use in any new
> language. I've been seriously missing that in any language that I've
> been using since I got to know the concept (and even before).

I completely agree with that. Not only that the code becomes longer,
but I think the destructuring of values becomes easier to see than in
multiple nested conditional statements and explicit selector functions
in the branches.

This is no statement against Lisp or Scheme, but just compare

  match l with 
      []     -> ... 
    | h::t   -> ...

with (something like)

(if (is_empty l) 
    ( ... )
    ( ... (car l) ... (cdr l) ... ))

and IMHO it becomes much worse if there are more than 2 cases.

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a6205f$0$1605$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Jon Harrop schrieb:
>> Even if there is four times as much code?
> 
> Not enough of a difference to get above the noise level.

No way. You really wouldn't rather inherit 100kLOC of code that does the
same thing as 400kLOC of code?

>>> 2. You can always count nodes in the AST instead of lines of code.
>> 
>> Lisp's verbosity stems primarily from its use of whitespace and
>> parentheses as well as a lack of pattern matching.
> 
> Ah, right, lack of pattern matching tends to bloat any code.
> I didn't notice this since the example code given didn't have any case
> distinctions.

Yes. An important point here is the fact that pattern matching is used
ubiquitously for decomposition as well as dynamic dispatch when available.

> I'm still surprised it isn't already in widespread use in any new
> language. I've been seriously missing that in any language that I've
> been using since I got to know the concept (and even before).

Indeed.

> ...
> I think the Lisp would look better if it weren'd indented so much.

I like LOC because I think it is absurd to split a trivial if expression
over several lines.

>>> For
>>> the above example, you'd end up at roughly the same figures for Lisp and
>>> your generic FPL, but as soon as you declare macros in Lisp, the FPL
>>> needs less nodes.
>> 
>> Are you saying that macros reduce code size?
> 
> First, I'm (wrongly) assuming that Lisp's relative verboseness comes
> from parentheses (and additional whitespace needed because those
> parentheses force you into more line wraps).

Yes. I that is another big effect.

> Seconds, I'm saying that needing macros means that you often writen the
> same semantics twice, once as a function and once (for those cases where
> it's a useful optimization) as a macro.
> So a language that works well without macros is shorter.
> (I assume that macros are more a library thing, not something that gets
> written routinely or redundantly, so I don't think the effect is large.)
> 
> So, no, I'm saying that needing macros tends to increase code size,
> though I can't say it would be much and suspect it isn't much indeed.

Yes. I think macros can be nice for aesthetics but they are easily overused
and abused for things like optimization.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85al5$so$1@online.de>
Jon Harrop schrieb:
> Joachim Durchholz wrote:
>> Jon Harrop schrieb:
>>> Even if there is four times as much code?
>> Not enough of a difference to get above the noise level.
> 
> No way. You really wouldn't rather inherit 100kLOC of code that does the
> same thing as 400kLOC of code?

Frankly, I wouldn't care whether the imported code is 100kLoC or 
400kLoC. I'd care more whether it's stable and has a healthy community.

The situation wasn't about importing code anyway, it was about writing 
it. And for that, a factor-4 blowup isn't exactly what I'd like, but 
it's definitely not a show stopper. Not if the AST is the same (I'd moan 
about it, of course, but I've been writing 250kB of PHP without too much 
pain in the past two years, and PHP is worse than anything we've been 
discussing here!)

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a62f63$0$1595$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> Frankly, I wouldn't care whether the imported code is 100kLoC or
> 400kLoC. I'd care more whether it's stable and has a healthy community.

All other things being equal.

> The situation wasn't about importing code anyway, it was about writing
> it. And for that, a factor-4 blowup isn't exactly what I'd like, but
> it's definitely not a show stopper. Not if the AST is the same (I'd moan
> about it, of course, but I've been writing 250kB of PHP without too much
> pain in the past two years, and PHP is worse than anything we've been
> discussing here!)

The ASTs are wildly different here. I wasn't referring to syntactic
differences. The Lispers compiled their pattern matches by hand and they
would have to maintain that by hand. Similarly, Java programmers would code
lots of unnecessary classes and inheritance hierarchies by hand and would
have to maintain those.

The differences in verbosity are more than skin deep...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f84tt5$gdd$2@online.de>
Cesar Rabak schrieb:
> Humm... I still find your comparison loaded: you rule out the use of 
> libraries for pattern matching in Lisp. Why?

My answer would be:

Using a pattern matching library adds yet another dependency to your 
code. Unless you know that the library is well-maintained, you don't 
want this kind of dependency.

In other words, I suspect it would find lots of use it if became part of 
the CLOS standard.

Regards,
Jo
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Suppi.21026$j7.379560@news.indigo.ie>
Joachim Durchholz wrote:

> In other words, I suspect it would find lots of use it if became part
> of the CLOS standard.
> 

You say "it", but pattern matching exists in quite a number of forms.
Should some cut-down one-sided matcher be standardised, very vaguely
akin to a decision to standardise on single-dispatch OO?  Should
matching be only on typed data (and thus should the Lisp type system be
beefed up before hand?), or should matching/unification be over more
arbitrary structure? Should there be some sort of a "meta match
protocol"? (for experimenting with controlled extension for matching
shape in mobile processes. Or something.)

Right now, there are several pattern matching or unification
libraries for lisp (sometimes as a relatively minor part of a larger
project). With different tradeoffs and powers.  The situation may well
be loosely analogous to some pre-clos-standardisation state of lisp
object systems.  You might say "pick one and move on". But I reckon
it's just too early.















 
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a62700$0$1625$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> You say "it", but pattern matching exists in quite a number of forms.
> Should some cut-down one-sided matcher be standardised, very vaguely
> akin to a decision to standardise on single-dispatch OO?  Should
> matching be only on typed data (and thus should the Lisp type system be
> beefed up before hand?), or should matching/unification be over more
> arbitrary structure? Should there be some sort of a "meta match
> protocol"? (for experimenting with controlled extension for matching
> shape in mobile processes. Or something.)
> 
> Right now, there are several pattern matching or unification
> libraries for lisp (sometimes as a relatively minor part of a larger
> project). With different tradeoffs and powers.  The situation may well
> be loosely analogous to some pre-clos-standardisation state of lisp
> object systems.  You might say "pick one and move on". But I reckon
> it's just too early.

You could just copy F#.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85adr$ht$1@online.de>
David Golden schrieb:
> Joachim Durchholz wrote:
> 
>> In other words, I suspect it would find lots of use it if became part
>> of the CLOS standard.
>>
> 
> You say "it", but pattern matching exists in quite a number of forms.
> Should some cut-down one-sided matcher be standardised, very vaguely
> akin to a decision to standardise on single-dispatch OO?  Should
> matching be only on typed data (and thus should the Lisp type system be
> beefed up before hand?), or should matching/unification be over more
> arbitrary structure? Should there be some sort of a "meta match
> protocol"? (for experimenting with controlled extension for matching
> shape in mobile processes. Or something.)

Oh, the vagaries of too much design space!

How about a library that leaves all these design options open for 
Lispers to explore? I always thought you can do anything in Lisp, no?

> Right now, there are several pattern matching or unification
> libraries for lisp (sometimes as a relatively minor part of a larger
> project). With different tradeoffs and powers.

I have looked at one or two of them, and what I found was more a 
trade-off of available developer time vs. "get it right in the first place".
Which is, I think, one of the reasons such libraries aren't in 
widespread use outside of the project that they were initially written for.

 > The situation may well
> be loosely analogous to some pre-clos-standardisation state of lisp
> object systems.  You might say "pick one and move on". But I reckon
> it's just too early.

Well, maybe.
It still seems to be part of a large NIH syndrome. Lisp isn't the only 
language that fails to pick up this lead. (Well, maybe Perl 6... though 
it will have to name it differently than "pattern matching", because all 
those Perl golfers out there will immediately misunderstand the term.)

Regards,
Jo
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Ujrpi.21028$j7.379591@news.indigo.ie>
Joachim Durchholz wrote:

> How about a library that leaves all these design options open for
> Lispers to explore?

That's what I was getting at with "meta match protocol", you might
have to be familiar with CLOS jargon to recognise that, granted.
(the meta object protocol allowing for the perversion of CLOS in a
controlled and portable fashion...)

> Well, maybe.
> It still seems to be part of a large NIH syndrome. 

Hmm. if anything, I'd say the lisp community was a pretty early adopter
of various matching functionalities (think of all the embedded
prologoids smushed into lisp apps over the years for starters).
(N.B. don't mistake  lack-of/facetious/hostile response to Harrop in
particular as disdain for all pattern matching and/or static typing,
Harrop has a history...)

There may be synergistic benefits to pattern matching in combination
with a more powerful type system, and I wouldn't really argue against
someone claiming that the lisp type system may need some beefing up. 
But writing a standard then doing something is usually the wrong order
anyway, best implement something (probably more than once...), then
propose a standard.

But getting everyone to agree on something, especially just when you
have things like Qi coming along and demonstrating that the tradeoffs
ML and Haskell made for their type systems aren't necessarily the
tradeoffs lispers might like [1]... ouch...

[1] http://www.lambdassociates.org/advtypes.htm
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a70bfe$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> There may be synergistic benefits to pattern matching in combination
> with a more powerful type system, and I wouldn't really argue against
> someone claiming that the lisp type system may need some beefing up.

The pattern matching in Haskell and ML swings on closed sum types which,
AFAIK, don't exist in Lisp. This is the main reason why ML-style pattern
matching isn't useful in Lisp.

> But writing a standard then doing something is usually the wrong order
> anyway, best implement something (probably more than once...), then
> propose a standard.
> 
> But getting everyone to agree on something, especially just when you
> have things like Qi coming along and demonstrating that the tradeoffs
> ML and Haskell made for their type systems aren't necessarily the
> tradeoffs lispers might like [1]... ouch...

You might want C++'s type system, yes.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <arHpi.21032$j7.379485@news.indigo.ie>
> The pattern matching in Haskell and ML swings on closed sum types
> which, AFAIK, don't exist in Lisp. This is the main reason why
> ML-style pattern matching isn't useful in Lisp. 

No shit (though there are techniques that come closer than things we've
seen here. No, I really can't be bothered going into more detail)

While it turns out a post [1] where I touch on that is invisible in
google groups due to some sort of hole in google's memory around the
17th, so charitably I guess you might have missed it too in whatever
the screw-up was, I had assumed you knew I and others knew that and
were just being your usual obnoxious self with later replies to certain
other posts, in keeping with a probable strategy to prolong threads and
afford more opportunities to spam.

> You might want C++'s type system, yes.

Qi's type system is turing-complete, yes. However, to assert it's the
therefore "the same" as the twisted horror of C++ would be silly.  I
assume you're not doing that.

[1]
http://coding.derkeiler.com/Archive/Lisp/comp.lang.lisp/2007-07/msg01113.html
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a79662$0$1598$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
>> You might want C++'s type system, yes.
> 
> Qi's type system is turing-complete, yes. However, to assert it's the
> therefore "the same" as the twisted horror of C++ would be silly.  I
> assume you're not doing that.

That's exactly what I'm doing. If you want to know why undecideability is
bad, read the literature on ML (it'll be in the history section now).

Qi has the world's most powerful type system. So does C++...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <Y9Npi.21040$j7.379595@news.indigo.ie>
> That's exactly what I'm doing.

Well, that is silly.  Didn't we hear enough of that in... 2005?
http://www.lambdassociates.org/studies/study03.htm
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a7ca66$0$1619$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
>> That's exactly what I'm doing.
> 
> Well, that is silly.

The C++ and Qi type systems provide exactly the same capabilities. That is a
useful and objective classification.

Incidentally, this debate was covered in detail at the advent of ML 30 years
ago, long before Qi existed. ML chose a path. Haskell chose a similar path.
OCaml also. Qi chose to copy C++ instead.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: David Golden
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <bOVpi.21047$j7.379504@news.indigo.ie>
Jonnie Boy wrote:
> The C++ and Qi type systems provide exactly the same capabilities.
> That is a useful and objective classification. 

About as useful as more usual arguments from programming language turing 
completeness at run-time rather than some aspect
(type/template/macro/whatever) of a compile-time.  Yes, it
distinguishes Qi from ML.  No, it doesn't say much about which of Qi or
C++ is nicer.  Run-time turing-completeness doesn't say much about
whether Ruby or Intercal is nicer.

> Incidentally, this debate was covered in detail at the advent of ML 30
> years ago, long before Qi existed. 

That just serves to highlight that your retreading and retreading of the
same old debates ad nauseum in a multitude of fora is a quite
deliberate decision on your part, presumably in order to spam (Nor is
ocaml some great answer to all debate, it's just a particular set of
tradeoffs aligned with some people's preferences)

> Qi chose to copy C++ instead 

Much like Python "chose to copy" fortran's turing completeness at run
time when it could have just copied regexes?  Given the goals and
nongoals of Qi your comment is a tad vacuous at best.

What little point you are presumably dragging out making, in order to
turn one spam opportunity into many, and it's one that's already been
made quite a lot, including extremely recently - that one might opt for
a more limiting type system to ensure some desired static decidability
property - should have been taken to be understood and covered by the
just-supplied link to Mark Tarver's
http://www.lambdassociates.org/studies/study03.htm 

Yes, I promise to really killfile Harrop now and stop helping him spam:
http://groups.google.com/group/comp.lang.lisp/msg/7f66e71bdbcf0ffc
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <ubk5snld4n.fsf@hod.lan.m-e-leypold.de>
BTW: You're F'upping to alt.killfile again. It's this kind of one-way
communication that in my eyes defines a spammer.

>
>> Incidentally, this debate was covered in detail at the advent of ML 30
>> years ago, long before Qi existed. 
>
> That just serves to highlight that your retreading and retreading of the
> same old debates ad nauseum in a multitude of fora is a quite
> deliberate decision on your part, presumably in order to spam (Nor is
> ocaml some great answer to all debate, it's just a particular set of
> tradeoffs aligned with some people's preferences)

Bah - nonsense. It serves to show that Jon has done his research
whereas other people haven't so insist on leading the same discussions
again and again (those who don't know their history are damned to
repeat it).

Regards -- Markus
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f83dfm$749$1@registered.motzarella.org>
Joachim Durchholz schrieb:
> Andr� Thieme schrieb:
>> Counting lines makes not much sense for Lisp. Although it supports all
>> these programming paradigms it has a very unique style which will blow
>> up the LOC count in several cases. But from this it doesn't follow, that
>> coding takes longer.
>>
>> This one liner:  (defun make-accumulator (n) (lambda (i) (incf n i)))
>> gets usually written in three visible lines:
>> (defun make-accumulator (n)
>>   (lambda (i)
>>     (incf n i)))
> 
> There are two answers to that:
> 
> 1. Coding doesn't take longer, but you can't place the same amount of 
> code on a screenful, so debugging and maintenance will take longer.

This might be true for several cases.
I also see the scenario in mind where longer names for variables or
functions bring in advantages for readability.
However, if you wanted then you could compress code in Lisp as well.
For example with my DEF:
(def make-accumulator (n) [incf n])


> Note that your typical generic FPL not only fits on a line, it even 
> takes less of a line; the syntactic Haskell equivalent of the above 
> example would look like this:

Well, for some situations you are right, for others you are not.
It depends on what you want to do.
I gave the example about calculating the powerset.
One other Haskell solution is for example this one:
powerset = filterM (const [True, False])

To do the same in Lisp you first need to write like 40 LOC preparation
code.

In other situations Lisp can be the better option.
Joel Raymond wrote about Erlangs advantages over Haskell:
http://wagerlabs.com/2006/1/1/haskell-vs-erlang-reloaded/
For example point 3.2 was talking about static typing and the extra
code it needed.


>   make-accumulator N = incf N

Hmm, does Haskell have infc?
Is that some monad that can do the destructive manipulation on N?
What is the minus (between "make" and "accumulator") doing when it is
placed on the right side of a "="?


> (No, Haskell isn't cheating, it simply doesn't have or need macros and 
> quoting, so it can encode the same code with far less symbols.)

It is true that the need for Macros in Haskell is reduced. This comes
from implicit currying but mainly from lazyness.
In Lisp you could do the same. You could teach Lisp implicit currying
and also implicit lazyness (altough I think that it would feel really
bad).
So yes, several things that are expressed in Lisp with Macros would be
expressed without them in Haskell. However, I am not sure if that comes
without performance hits.


> Now that's 27 instead of 52 characters, which means I can put nearly 
> double the code on a single line without cramming it.

With short code examples it will probably often happen.



> 2. You can always count nodes in the AST instead of lines of code. For 
> the above example, you'd end up at roughly the same figures for Lisp and 
> your generic FPL, but as soon as you declare macros in Lisp, the FPL 
> needs less nodes.

This might be a little misunderstanding.
Usually after introducing macros the count of nodes is reduced.


Andr�
-- 
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7x4pjuf9w5.fsf@ruckus.brouhaha.com>
Andr� Thieme <······························@justmail.de> writes:
> In Lisp you could do the same. You could teach Lisp implicit currying
> and also implicit lazyness (altough I think that it would feel really bad).

You can't do anything of the sort.  Consider that Haskell has no
language support for exceptions or continuations, because they're
implemented straightforwardly as library modules, using a combination
of lazy evaluation and currying.  They can't be implemented that way
in Lisp, so exceptions are built into the language and continuations
(in CL) can't be done at all without nonstandard extensions.
From: Neelakantan Krishnaswami
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfac3i0.p8a.neelk@gs3106.sp.cs.cmu.edu>
In article <<··············@ruckus.brouhaha.com>>,
Paul Rubin <> wrote:
> 
> You can't do anything of the sort.  Consider that Haskell has no
> language support for exceptions or continuations, because they're
> implemented straightforwardly as library modules, using a
> combination of lazy evaluation and currying.  They can't be
> implemented that way in Lisp, so exceptions are built into the
> language and continuations (in CL) can't be done at all without
> nonstandard extensions.

Continuations can definitely be implemented in Lisp to the same extent
that they are in Haskell. What Haskell's continuation module provides
is a nice, strongly-typed interface to code in continuation passing
style. It does *not* let you capture arbitrary continuations, the way
you can in Scheme.

Being able to conveniently program in CPS is a good thing, IMO, but a)
I certainly wouldn't call it full support for continuations, and b)
you can program in CPS in any Lisp with tail call optimization.

-- 
Neel R. Krishnaswami
·····@cs.cmu.edu
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a6220d$0$1621$ed2619ec@ptn-nntp-reader02.plus.net>
Neelakantan Krishnaswami wrote:
> Being able to conveniently program in CPS is a good thing, IMO,

Depends what the trade-off is, IMHO.

> but a) 
> I certainly wouldn't call it full support for continuations, and b)
> you can program in CPS in any Lisp with tail call optimization.

Sure. But Lisp doesn't require tail calls and does require dynamic binding
which undermines tail calls so few Lisps actually do tail calls and there
is no standard.

  http://www.cliki.net/Tail Recursion

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gmmt4F3e7h4vU1@mid.individual.net>
Jon Harrop wrote:

> Lisp doesn't require tail calls and does require dynamic binding
> which undermines tail calls so few Lisps actually do tail calls and there
> is no standard.

Most Common Lisp implementations support tail call optimization in one 
form or the other. It is true that there is no standard way to ensure 
this, but saying that few Lisps do this is definitely wrong.

Dynamically scoped variables don't undermine tail calls either.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gmlltF3h66psU1@mid.individual.net>
Paul Rubin wrote:
> Andr� Thieme <······························@justmail.de> writes:
>> In Lisp you could do the same. You could teach Lisp implicit currying
>> and also implicit lazyness (altough I think that it would feel really bad).
> 
> You can't do anything of the sort.  Consider that Haskell has no
> language support for exceptions or continuations, because they're
> implemented straightforwardly as library modules, using a combination
> of lazy evaluation and currying.  They can't be implemented that way
> in Lisp, so exceptions are built into the language

No, exceptions can be implemented in a Common Lisp without exceptions 
already built in. For example, see 
http://www.nhplace.com/kent/CL/Revision-18.lisp

They are built into ANSI Common Lisp because (a) ANSI Common Lisp 
doesn't make a distinction between built-in and library functionality 
and (b) it's important that the same condition system is used throughout 
your program, so that libraries can more easily react to exceptions from 
other libraries.


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a6250c$0$1588$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Costanza wrote:
> No, exceptions can be implemented in a Common Lisp without exceptions
> already built in. For example, see
> http://www.nhplace.com/kent/CL/Revision-18.lisp

Turing argument.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gmmutF3e7h4vU2@mid.individual.net>
Jon Harrop wrote:
> Pascal Costanza wrote:
>> No, exceptions can be implemented in a Common Lisp without exceptions
>> already built in. For example, see
>> http://www.nhplace.com/kent/CL/Revision-18.lisp
> 
> Turing argument.

Quoted out of context.

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <5grvf5F3hrojjU1@mid.individual.net>
Pascal Costanza wrote:
> Jon Harrop wrote:
>> Pascal Costanza wrote:
>>> No, exceptions can be implemented in a Common Lisp without exceptions
>>> already built in. For example, see
>>> http://www.nhplace.com/kent/CL/Revision-18.lisp
>>
>> Turing argument.
> 
> Quoted out of context.

Jon, I didn't get the intent of your post the first time around, and I 
apologize for my knee-jerk reaction.

Here is a more precise answer: For a good exception handling system, you 
don't need full continuations. One-shot escaping continuations + 
unwind-protect / try-finally / dynamic-wind, or the like, are 
sufficient, and core Common Lisp has both. You don't need to implement 
an interpreter for a different language that has continuations for 
implementing exception handling in core Common Lisp, and this is what 
the implementation behind the link above shows. So this has nothing to 
do with Turing equivalence - we have a true embedding of exception 
handling here.

So how are the benchmarks for my last version of the minim interpreter 
coming along?


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a8cf7e$0$1611$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Costanza wrote:
>>> Turing argument.
>> 
>> Quoted out of context.
> 
> Jon, I didn't get the intent of your post the first time around, and I
> apologize for my knee-jerk reaction.

No worries.

> Here is a more precise answer: For a good exception handling system, you
> don't need full continuations. One-shot escaping continuations +
> unwind-protect / try-finally / dynamic-wind, or the like, are
> sufficient, and core Common Lisp has both. You don't need to implement
> an interpreter for a different language that has continuations for
> implementing exception handling in core Common Lisp, and this is what
> the implementation behind the link above shows. So this has nothing to
> do with Turing equivalence - we have a true embedding of exception
> handling here.

Yes. This wasn't as bad as the other thread but I think the whole thing is
spiralling into a simple Turing argument. There is no easy way to add
callcc to Lisp (or OCaml).

> So how are the benchmarks for my last version of the minim interpreter
> coming along?

Your compiler-in-Lisp was the fastest of all implementations that I
benchmarked. PILS and Qi are the slowest. However, there was a Minim->C
compiler written in Haskell that I've yet to test but that, of course, will
be almost unbeatable (and compile time for such a small target program will
be insignificant).

When I can find the time I'll write a Minim->C compiler in OCaml and a
staged Minim interpreter in MetaOCaml.

How's the lexer/parser in Lisp going? :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Pascal Costanza
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <5gs3ntF3i98lvU1@mid.individual.net>
Jon Harrop wrote:
> When I can find the time I'll write a Minim->C compiler in OCaml and a
> staged Minim interpreter in MetaOCaml.

I'd be interested to see the code for the latter.

> How's the lexer/parser in Lisp going? :-)

Syntax is for sissies. ;)


Pascal

-- 
My website: http://p-cos.net
Common Lisp Document Repository: http://cdr.eurolisp.org
Closer to MOP & ContextL: http://common-lisp.net/project/closer/
From: Kent M Pitman
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <u8x92ixii.fsf@nhplace.com>
Pascal Costanza <··@p-cos.net> writes:

> Syntax is for sissies. ;)

But without syntax what would there be to do timings on?

Some languages just gotta make work for themselves so their practitioners
will have something to bill for in terms of implementation, timing, tuning,
training, debugging, interfacing, etc. ... it's a whole industry.
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a92ce9$0$1624$ed2619ec@ptn-nntp-reader02.plus.net>
Kent M Pitman wrote:
> Some languages just gotta make work for themselves so their practitioners
> will have something to bill for in terms of implementation, timing,
> tuning, training, debugging, interfacing, etc. ... it's a whole industry.

Indeed. Writing a Mathematica to C# compiler would pay the bills if nothing
else...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <3babti9q9z.fsf@hod.lan.m-e-leypold.de>
>> Here is a more precise answer: For a good exception handling system, you
>> don't need full continuations. One-shot escaping continuations +
>> unwind-protect / try-finally / dynamic-wind, or the like, are
>> sufficient, and core Common Lisp has both. You don't need to implement
>> an interpreter for a different language that has continuations for
>> implementing exception handling in core Common Lisp, and this is what
>> the implementation behind the link above shows. So this has nothing to
>> do with Turing equivalence - we have a true embedding of exception
>> handling here.
>
> Yes. This wasn't as bad as the other thread but I think the whole thing is
> spiralling into a simple Turing argument. There is no easy way to add
> callcc to Lisp (or OCaml).


Jon -- what do you mean by "not easy" here: Not easy in the current
implementations or is callc somehow in conflict with the language
structure itself? What I'm aiming at is the question wether a
different runtime (with a different stack implementation like most
scheme implementations I know of) would allow integrating callc to
OCaml and/or Lisp. On the other side, if that was not possible
(because callcc is somehow in conflict with the rest of the language
definition) that would mean that there would be no efficient or
straight forward compiler from Lisp and/or OCaml to Scheme, something
I always considered doable, though perhaps not desirable.  


Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a9c068$0$1624$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> Jon -- what do you mean by "not easy" here: Not easy in the current
> implementations or is callc somehow in conflict with the language
> structure itself? What I'm aiming at is the question wether a
> different runtime (with a different stack implementation like most
> scheme implementations I know of) would allow integrating callc to
> OCaml and/or Lisp.

That is possible, as you say, but getting decent performance out of an
implementation that uses a custom stack is likely to be difficult. If you
stick with an array-based representation then ordinary performance should
be good (you would need to allocate a huge array though) but callcc would
have to copy the whole array. If you switch to an immutable list-based
representation then ordinary code will be much slower (the GC will be
stressed a lot more, collecting stack frames) but there is no need to copy
the stack.

> On the other side, if that was not possible 
> (because callcc is somehow in conflict with the rest of the language
> definition) that would mean that there would be no efficient or
> straight forward compiler from Lisp and/or OCaml to Scheme, something
> I always considered doable, though perhaps not desirable.

There is no fundamental theoretical problem besides changing the stack to
make it persistent.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <2jlkd26q4r.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. wrote:
>> Jon -- what do you mean by "not easy" here: Not easy in the current
>> implementations or is callc somehow in conflict with the language
>> structure itself? What I'm aiming at is the question wether a
>> different runtime (with a different stack implementation like most
>> scheme implementations I know of) would allow integrating callc to
>> OCaml and/or Lisp.
>
> That is possible, as you say, but getting decent performance out of an
> implementation that uses a custom stack is likely to be difficult. 

May I contradict here? I thought, most of the Schemes have a decent
performance, even with callcc?

> There is no fundamental theoretical problem besides changing the stack to
> make it persistent.

Good :-).

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46ab7481$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Markus E.L. wrote:
> May I contradict here? I thought, most of the Schemes have a decent
> performance, even with callcc?

SML/NJ is another one to look at: it transforms into CPS before compilation
so (AFAIK) there is no non-persistent stack. I believe all such languages
are significantly slower than those with conventional stacks.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xps2adnwi.fsf@ruckus.brouhaha.com>
Jon Harrop <···@ffconsultancy.com> writes:
> SML/NJ is another one to look at: it transforms into CPS before compilation
> so (AFAIK) there is no non-persistent stack. I believe all such languages
> are significantly slower than those with conventional stacks.

ORBIT (the compiler for T) was certainly one of the fastest Scheme
systems out there, back in the day, and it converted to CPS immediately.
It's possible that the CPS got compiled into stack code when appropriate.
From: Markus E.L.
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <v4bqdu2sdh.fsf@hod.lan.m-e-leypold.de>
> Markus E.L. wrote:
>> May I contradict here? I thought, most of the Schemes have a decent
>> performance, even with callcc?
>
> SML/NJ is another one to look at: it transforms into CPS before compilation
> so (AFAIK) there is no non-persistent stack. I believe all such languages
> are significantly slower than those with conventional stacks.


Chicken-Scheme AFAIK has a different approach: They use stack-frames
but also can compact the stack in some way. I'm not sure how it works
but remember that they claim they have all advantages of a linear
stack and can still support call/cc efficiently.

And, AFAIK, the transformation to CPS is not a requirement for
supporting continuations with non-linear stacks: A more direct
representation of stack frames as linked blocks in the heap should be
possible and perhaps faster than building closures (but I'm talking
about things I hardly know here).

Regards -- Markus
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85pvd$h3m$1@registered.motzarella.org>
Paul Rubin schrieb:
> Andr� Thieme <······························@justmail.de> writes:
>> In Lisp you could do the same. You could teach Lisp implicit currying
>> and also implicit lazyness (altough I think that it would feel really bad).
> 
> You can't do anything of the sort.

This is not correct.


> Consider that Haskell has no
> language support for exceptions or continuations, because they're
> implemented straightforwardly as library modules,

The same is true for Lisp. It comes with an exception system which is
programmed in Lisp. If you want you can make your own during the weekend
and others can import it as a lib. For an exception system it mostly
makes sense if everyone is using the same. I guess that Haskell could in
principle have 30 of them but probably only one is widely used.


> using a combination of lazy evaluation and currying.

In Lisp it is done with a mix of functional programming, currying,
imperative programming and some few macros.


> They can't be implemented that way in Lisp

This is not correct.
You could implement implicit currying or lazyness in Lisp and put it
online as a lib that can be loaded into a running program.
Implicit lazyness would be a tough task, nothing for 30 minutes.

Maybe in Haskell you can do the same: write a lib that when loaded will
make Haskell implicitily eager. Don't know if that is possible and if
yes how easy it would be.


Andr�
-- 
From: Paul Rubin
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7xbqe0sd6s.fsf@ruckus.brouhaha.com>
Andr� Thieme <······························@justmail.de> writes:
> The same is true for Lisp. It comes with an exception system which is
> programmed in Lisp. If you want you can make your own during the weekend
> and others can import it as a lib. 

But I don't think this is possible, unless I'm missing something.  I
mean, you could write the Lisp condition system as a macro library,
but down at the bottom the library would have to use a
language-supported primitive like catch/throw or something, to
interrupt the flow of control.  What I'm getting at is that Haskell
doesn't need such a primitive because the ability to steer the flow of
control arises automatically from lazy evaluation.

Thanks to Neel re clarification about continuations.
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural   language Minim
Date: 
Message-ID: <f87c2u$7da$1@online.de>
Andr� Thieme schrieb:
> 
> The same is true for Lisp. It comes with an exception system which is
> programmed in Lisp. If you want you can make your own during the weekend
> and others can import it as a lib.

Hm... I think that weekend would be just the programming.
Designing an exception system well requires a great deal of experience.

 > For an exception system it mostly
> makes sense if everyone is using the same. I guess that Haskell could in
> principle have 30 of them but probably only one is widely used.

Confirm.

> This is not correct.
> You could implement implicit currying or lazyness in Lisp and put it
> online as a lib that can be loaded into a running program.
> Implicit lazyness would be a tough task, nothing for 30 minutes.

In any language where that is possible at all.
Implicit laziness requires a great deal of support from the run-time 
system to get even near acceptable performance. I don't think that Lisp 
could do that; you'd essentially end up reimplementing Haskell in Lisp 
(or a lazy Lisp in Lisp).

> Maybe in Haskell you can do the same: write a lib that when loaded will
> make Haskell implicitily eager. Don't know if that is possible and if
> yes how easy it would be.

No, that's not possible.

You can't look "under the hood of the implementation" in Haskell as you 
would in Lisp.

Novices often try to achieve efficiency by forcing expressions into 
eager evaluation. Most fail, because the eagerness operators in Haskell 
are nontrivial to use; even those who succeed have rather ugly code and 
drop the practice quickly.
OTOH there are other optimization techniques that work well. 
(Memoization is a popular alternative and does not require you to hash 
your code up.)
Leaving optimization and debugging purposes aside, I haven't seen 
anybody wanting eager evaluation in Haskell in years, not even in 
specific situations and most certainly not as a by-default strategy. (I 
believe the latter is more because those who find lazy evaluation 
utterly unpalatable don't use Haskell for long.)

Regards,
Jo
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a79672$0$1598$ed2619ec@ptn-nntp-reader02.plus.net>
Joachim Durchholz wrote:
> In any language where that is possible at all.
> Implicit laziness requires a great deal of support from the run-time
> system to get even near acceptable performance. I don't think that Lisp
> could do that; you'd essentially end up reimplementing Haskell in Lisp
> (or a lazy Lisp in Lisp).

Exactly.

> Leaving optimization and debugging purposes aside, I haven't seen
> anybody wanting eager evaluation in Haskell in years, not even in
> specific situations and most certainly not as a by-default strategy.

I lurk on the Haskell Cafe mailing list and eager evaluation is often the
solution to reliability or performance problems: laziness is unpredictable.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Chris Smith
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <MPG.2112598af29cadd59898cd@news.altopia.net>
Jon Harrop <···@ffconsultancy.com> wrote:
> I lurk on the Haskell Cafe mailing list

Hmm.  Most people's definition of "lurk" doesn't include posting. :)

> and eager evaluation is often the
> solution to reliability or performance problems: laziness is unpredictable.

True: eager evaluation is often the solution to performance problems.  
Equally true: lazy evaluation is often the solution to performance 
problems.  It would be nice if one or the other were always the right 
answer, but alas, that's not the case.

Interestingly, one thing that IS true is that eager-evaluation pure 
functional programs are actually asymptotically worse than alternatives 
for some problems.  That's part of why they don't exist.  Some languages 
(e.g., Haskell) choose to offer lazy evaluation to recover that lost 
performance.  Others (e.g., ML, Lisp) choose to offer impure language 
features to recover that lost performance.

-- 
Chris Smith
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f8cl0i$1qj$1@online.de>
Chris Smith schrieb:
> Interestingly, one thing that IS true is that eager-evaluation pure 
> functional programs are actually asymptotically worse than alternatives 
> for some problems.

Any background information where I can read up on that?

Regards,
Jo
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <n2vauuh4pbtk.1kj81loijm5vk.dlg@40tude.net>
Jon Harrop wrote:

> I lurk on the Haskell Cafe mailing list and eager evaluation is often the
> solution to reliability or performance problems: laziness is unpredictable.

Do you have an example?

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a8694a$0$1626$ed2619ec@ptn-nntp-reader02.plus.net>
Frank Buss wrote:
> Jon Harrop wrote:
>> I lurk on the Haskell Cafe mailing list and eager evaluation is often the
>> solution to reliability or performance problems: laziness is
>> unpredictable.
> 
> Do you have an example?

Sure:

"Need for speed: the Burrows-Wheeler Transform" -
Andrew Coppin wrote a beautiful implementation of the Burrows-Wheeler
compression algorithm in Haskell. Using it to transform 4kB of data
required 60Mb of memory and scaled uncontrollably. After a month of
optimizing by experts, it remains orders of magnitude slower than
production implementations.

"Haskell version of ray tracer code is much slower than the original ML" -
Philip Armstrong had a go at translating my ray tracer into Haskell. The
first version is many times slower. After a week of intensive optimization
by several experts, it avoids laziness (acting as a poor ML) and remains
50% slower.

I have been evaluating Haskell for two months now, with a view to writing
a "Haskell for Scientists".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Frank Buss
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <13jt3e6fe8nfe.1u5evriibmyu3$.dlg@40tude.net>
Jon Harrop wrote:

> "Need for speed: the Burrows-Wheeler Transform" -
> Andrew Coppin wrote a beautiful implementation of the Burrows-Wheeler
> compression algorithm in Haskell. Using it to transform 4kB of data
> required 60Mb of memory and scaled uncontrollably. After a month of
> optimizing by experts, it remains orders of magnitude slower than
> production implementations.

Looks like you mean this article:

········································@haskell.org/msg25609.html

I can't see that the first version needed a month of optimizing and looks
like a prove for the opposite. Quoting from the posting:

"Woah... What the hell? I just switched to Data.ByteString.Lazy and WHAM!
Vast speed increases..."

I'm not a Haskell expert, but "Data.ByteString.Lazy" sounds very much like
lazy evaluation. And this last version in the first posting was already
faster by a factor of 4 than the C++ version, but it uses more memory.

> "Haskell version of ray tracer code is much slower than the original ML" -
> Philip Armstrong had a go at translating my ray tracer into Haskell. The
> first version is many times slower. After a week of intensive optimization
> by several experts, it avoids laziness (acting as a poor ML) and remains
> 50% slower.

Looks like you mean this article:

http://groups.google.com/group/fa.haskell/browse_thread/thread/797b6ff42a1171d2

I can't see "a week of intensive optimization", at least not for the lazy
part, which was suggested at the same day the first posting was made. The
benchmark was posted one day after the first posting, which says 38.2
seconds for the Haskell version and 23.8 seconds for the OCaml version.

But in general you may be right for this example that lazy evaluation was
slower than strict evaluation, I didn't studied the source code and run
benchmarks on both versions.

-- 
Frank Buss, ··@frank-buss.de
http://www.frank-buss.de, http://www.it4-systems.de
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a8cdda$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Frank Buss wrote:
> "Woah... What the hell? I just switched to Data.ByteString.Lazy and WHAM!
> Vast speed increases..."

That is a Haskell vs Haskell comparison and says nothing in comparison with
other languages.

> I'm not a Haskell expert, but "Data.ByteString.Lazy" sounds very much like
> lazy evaluation. And this last version in the first posting was already
> faster by a factor of 4 than the C++ version, but it uses more memory.

As you might imagine, that is comparing Haskell with some pretty awful C++
code!

If you compare with good C++, you get a very different result as Stefan
O'Rear noted in that very same thread:

"Mr. C++ apparently isn't a very good C++ programmer, since his best
effort absolutely *pales* in comparison to Julian Seward's BWT: ... This C++
is 200 times faster."

http://www.mail-archive.com/haskell-cafe%40haskell.org/msg25645.html

> I can't see "a week of intensive optimization", at least not for the lazy
> part, which was suggested at the same day the first posting was made.

Philip Armstrong's first post with an already-optimized ray tracer was 21st
June:

  http://www.mail-archive.com/haskell-cafe%40haskell.org/msg25457.html

Spencer Janssen's major optimization was 25th June. Final version is dated
5th July:

  http://www.kantaka.co.uk/darcs/ray/

> The 
> benchmark was posted one day after the first posting, which says 38.2
> seconds for the Haskell version and 23.8 seconds for the OCaml version.
> 
> But in general you may be right for this example that lazy evaluation was
> slower than strict evaluation, I didn't studied the source code and run
> benchmarks on both versions.

You can convert from eager back to lazy easily in Haskell: just remove
the !s.

On my 2.2Ghz Athlon 64 dual core x64 Debian I get running times:

Lazy Haskell:   41.5s  75LOC  GHC 6.6.1
Eager Haskell:  18.7s  75LOC  GHC 6.6.1
OCaml:           7.2s  56LOC  OCaml 3.10.0

So idiomatic Haskell is ~6x slower than idiomatic OCaml on my machine.

Compile times:

GHC:   2.776s
OCaml: 0.375s

That was comparing the OCaml:

  http://www.ffconsultancy.com/languages/ray_tracer/comparison.html

to the Haskell (with and without the !s) by Philip Armstrong, Bulat
Ziganshin, Mark T.B. Carroll, Simon Marlow, Donald Bruce Stewart, Claus
Reinke and Spencer Janssen:

  http://www.kantaka.co.uk/darcs/ray/

I also found it interesting that this trivial benchmark showed up several
bugs in GHC.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Ulf Wiger
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xcz4pjus4yg.fsf@cbe.ericsson.se>
>>>>> Andre Thieme writes:

  Andre> In other situations Lisp can be the better option.  Joel
  Andre> Raymond wrote about Erlangs advantages over Haskell:
  Andre> http://wagerlabs.com/2006/1/1/haskell-vs-erlang-reloaded/ For
  Andre> example point 3.2 was talking about static typing and the
  Andre> extra code it needed.

A paper at the upcoming ICFP conference will present a more 
systematic comparison between C++, Erlang and Haskell.
It shows some interesting stuff, but also warns against too
far-reaching conclusions.

Evaluating High-Level Distributed Language Constructs
    by Jan Nystrom, Phil Trinder, David King

BR,
Ulf W
-- 
Ulf Wiger, Senior Specialist,
   / / /   Architecture & Design of Carrier-Class Software
  / / /    Team Leader, Software Characteristics
 / / /     Ericsson AB, IMS Gateways
From: Don Dwoske
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185285615.728712.153610@d55g2000hsg.googlegroups.com>
On Jul 24, 2:15 am, Ulf Wiger <·······@cbe.ericsson.se> wrote:
> A paper at the upcoming ICFP conference will present a more
> systematic comparison between C++, Erlang and Haskell.
> It shows some interesting stuff, but also warns against too
> far-reaching conclusions.
>
> Evaluating High-Level Distributed Language Constructs
>     by Jan Nystrom, Phil Trinder, David King


Brief discussion and a link to that paper can be found here:
http://lambda-the-ultimate.org/node/2287

Just the paper:
http://www.macs.hw.ac.uk/~trinder/papers/ICFP2007.pdf
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a5af50$0$1630$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> In other situations Lisp can be the better option.
> Joel Raymond wrote about Erlangs advantages over Haskell:
> http://wagerlabs.com/2006/1/1/haskell-vs-erlang-reloaded/
> For example point 3.2 was talking about static typing and the extra
> code it needed.

Look at the vector record and zero vector definition from my ray tracer. In
Lisp:

(defstruct (vec (:conc-name nil)
                (:constructor vec (x y z))
                (:type (vector double-float)))
  x y z)

(defvar zero (vec 0d0 0d0 0d0))

In OCaml:

type vec = {x:float; y:float; z:float}
let zero = {x=0.; y=0.; z=0.}

>> (No, Haskell isn't cheating, it simply doesn't have or need macros and
>> quoting, so it can encode the same code with far less symbols.)
> 
> It is true that the need for Macros in Haskell is reduced. This comes
> from implicit currying but mainly from lazyness.
> In Lisp you could do the same. You could teach Lisp implicit currying
> and also implicit lazyness (altough I think that it would feel really
> bad).

Greenspun.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f84som$27a$1@aioe.org>
Jon Harrop escreveu:
> Andr� Thieme wrote:
>> In other situations Lisp can be the better option.
>> Joel Raymond wrote about Erlangs advantages over Haskell:
>> http://wagerlabs.com/2006/1/1/haskell-vs-erlang-reloaded/
>> For example point 3.2 was talking about static typing and the extra
>> code it needed.
> 
> Look at the vector record and zero vector definition from my ray tracer. In
> Lisp:
> 
> (defstruct (vec (:conc-name nil)
>                 (:constructor vec (x y z))
>                 (:type (vector double-float)))
>   x y z)
> 
> (defvar zero (vec 0d0 0d0 0d0))
> 
> In OCaml:
> 
> type vec = {x:float; y:float; z:float}
> let zero = {x=0.; y=0.; z=0.}
> 
For a LOC counting perpective, both snippets above have the same count: 
2 logical lines.
From: Alexander Schmolck
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <yfswswpu9zl.fsf@gmail.com>
Cesar Rabak <·······@yahoo.com.br> writes:

>> (defstruct (vec (:conc-name nil)
>>                 (:constructor vec (x y z))
>>                 (:type (vector double-float)))
>>   x y z)
>>
>> (defvar zero (vec 0d0 0d0 0d0))
>>
>> In OCaml:
>>
>> type vec = {x:float; y:float; z:float}
>> let zero = {x=0.; y=0.; z=0.}
>>
> For a LOC counting perpective, both snippets above have the same count: 2
> logical lines.

You should write a paper introducing this brilliant new code metric of
"logical lines" (my I suggest "PROGN the ultimate 1-liner" for a title?).

'as
From: Cesar Rabak
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85e9o$oos$1@aioe.org>
Alexander Schmolck escreveu:
> Cesar Rabak <·······@yahoo.com.br> writes:
> 
>>> (defstruct (vec (:conc-name nil)
>>>                 (:constructor vec (x y z))
>>>                 (:type (vector double-float)))
>>>   x y z)
>>>
>>> (defvar zero (vec 0d0 0d0 0d0))
>>>
>>> In OCaml:
>>>
>>> type vec = {x:float; y:float; z:float}
>>> let zero = {x=0.; y=0.; z=0.}
>>>
>> For a LOC counting perpective, both snippets above have the same count: 2
>> logical lines.
> 
> You should write a paper introducing this brilliant new code metric of
> "logical lines" (my I suggest "PROGN the ultimate 1-liner" for a title?).
> 
I regret inform that this has already been done, and it is a practice 
used for companies for 'benchmarking' productivity and make 
approximations against the useful program result in Function Points. The 
technique is called "backfiring" and has almost twenty years.

The idea behind it is to avoid the noise coming from different 
indentation styles, continuation of a (logical) line in more than a 
physical line and the converse (having more than a logical line in the 
same physical one).
From: André Thieme
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85nv7$aas$1@registered.motzarella.org>
Jon Harrop schrieb:
> Andr� Thieme wrote:
>> In other situations Lisp can be the better option.
>> Joel Raymond wrote about Erlangs advantages over Haskell:
>> http://wagerlabs.com/2006/1/1/haskell-vs-erlang-reloaded/
>> For example point 3.2 was talking about static typing and the extra
>> code it needed.
> 
> Look at the vector record and zero vector definition from my ray tracer. In
> Lisp:
> 
> (defstruct (vec (:conc-name nil)
>                 (:constructor vec (x y z))
>                 (:type (vector double-float)))
>   x y z)
> 
> (defvar zero (vec 0d0 0d0 0d0))
> 
> In OCaml:
> 
> type vec = {x:float; y:float; z:float}
> let zero = {x=0.; y=0.; z=0.}


Most of what we find in here are hints for the compiler.
Also the Lisp code here is doing more than your OCaml.

(defstruct vec x y z) would have been enough if you don't want to
tell the compiler more and ask Lisp to build a constructor for you.


Andr�
-- 
From: jayessay
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m3myxlss66.fsf@sirius.goldenthreadtech.com>
Joachim Durchholz <··@durchholz.org> writes:

> takes less of a line; the syntactic Haskell equivalent of the above
> example would look like this:
>    make-accumulator N = incf N

This is incorrect for several reasons, the most obvious being the lack
of accounting for the variable arity of incf...


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Joachim Durchholz
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <f85atv$19i$1@online.de>
jayessay schrieb:
> Joachim Durchholz <··@durchholz.org> writes:
> 
>> takes less of a line; the syntactic Haskell equivalent of the above
>> example would look like this:
>>    make-accumulator N = incf N
> 
> This is incorrect for several reasons, the most obvious being the lack
> of accounting for the variable arity of incf...

Haskell is a currying language, so at least this counterargument doesn't 
hold.

With some handwaving, the above could could be written as

   make-accumulator N ... = incf N ...

with the understanding that the three dots stand for an arbitrary number 
of parameters to be copied through verbatim from left to right.

Actually, in Haskell,

   make-accumulator N = incf N

is equivalent to

   make-accumulator = incf

with the sole exception that the first declaration forces 
make-accumulator to have at least one parameter.

Hope this clears it up a little.

Regards,
Jo
From: jayessay
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <m3ejixsp8y.fsf@sirius.goldenthreadtech.com>
Joachim Durchholz <··@durchholz.org> writes:

> jayessay schrieb:
> > Joachim Durchholz <··@durchholz.org> writes:
> >
> >> takes less of a line; the syntactic Haskell equivalent of the above
> >> example would look like this:
> >>    make-accumulator N = incf N
> > This is incorrect for several reasons, the most obvious being the
> > lack
> > of accounting for the variable arity of incf...
> 
> Haskell is a currying language, so at least this counterargument
> doesn't hold.
> 
> With some handwaving, the above could could be written as
> 
>    make-accumulator N ... = incf N ...
> 
> with the understanding that the three dots stand for an arbitrary
> number of parameters to be copied through verbatim from left to right.

Isn't the "handwaving" aspect the point?, i.e., I don't think anyone
is saying you can't do this, but simply that what you wrote was _not_
equivalent.


>    make-accumulator N = incf N
> 
> is equivalent to
> 
>    make-accumulator = incf

The N is not the issue - it was the missing i.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Markus E Leypold
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <xvk5spn25b.fsf@hod.lan.m-e-leypold.de>
> Isn't the "handwaving" aspect the point?, i.e., I don't think anyone
> is saying you can't do this, but simply that what you wrote was _not_
> equivalent.

Equivalent in which sense? 

Regards -- Markus
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a709f2$0$1598$ed2619ec@ptn-nntp-reader02.plus.net>
jayessay wrote:
> The N is not the issue - it was the missing i.

The "missing i" makes no difference so you might as well not write it.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a5c76c$0$1600$ed2619ec@ptn-nntp-reader02.plus.net>
Stefan Ram wrote:
>   So, this is supposed to be a benchmark for the execution time?

Yes. I get a lot of errors when I try to compile it:

$ javac stefanram.java
stefanram.java:10: unclosed character literal
    { final java.lang.Object 'source entry' = state.'current entry'();
                             ^
stefanram.java:10: unclosed character literal
    { final java.lang.Object 'source entry' = state.'current entry'();

Any ideas what these mean?

>   I believe that I now could write a faster interpreter by
>   first translating the S-expression to an object model.

Java has lex and yacc tools (JavaCC combines these, IIRC) so you could use
them for parsing.

Define an abstract syntax tree (AST) as a class hierarchy with an abstract
base class representing the AST type. This will be more verbose than the
Lisp or OCaml because you have to write the classes down by hand in Java.
Get the parser to generate an AST.

When it comes to evaluation, you can use the built-in virtual function
dispatch (e.g. a "run" member function) to avoid the nested ifs and string
compares that you're currently doing.

Finally, you can avoid name lookups on variables and tags in gotos by
replacing these with ints.

I might have a go at doing this in Java...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Steven E. Harris
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <7yhcntlllc.fsf@panix.com>
···@zedat.fu-berlin.de (Stefan Ram) writes:

> Here is a Java implementation.

You have a truly unique style of writing and formatting your Java
code. It took me a few moments to get accustomed to it, but it is
readable. Aside from similarities to idiomatic Lisp, it reminds me of
Dinkumware's style used in their C++ libraries. Take that as a
compliment.

-- 
Steven E. Harris
From: Frederic
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <1185350715.371483.106710@i13g2000prf.googlegroups.com>
On Jul 18, 9:24 am, Mark Tarver <··········@ukonline.co.uk> wrote:
> \Jon suggested that it would be good to implement some significant
> programs in different functional languages for comparison.  He
> suggested interpreters for procedural languages like Basic.

Here is a partial solution in Haskell.  My version of Minim
is far more expressive than the original one (this comes from
free with my approach), and there is no need for an interpreter:
at run-time, the expression is "compiled" into a lambda-expression
that executes the code.  Hence, it integrates seamlessly within
the rest of Haskell.  The limitation is that everything is coded
in CPS (Continuation Passing Style) for easy use of Goto, hence
the result is somewhat slow (on my machine, 0.04 secs were necessary).
A better approach would be to replace CPS by exceptions, since a
Goto does not need to rewind ; performance should then become much
better, at the cost of purity and elegance.

data Minim r = Action (IO ())
             | Goto Tag
             | Label Tag
             | If (IO Bool) [Minim r] [Minim r]

That is, a program is either an action (embedded as a Haskell
computation in the IO monad), a Goto, a Label, or a test
(which condition is also embedded as a computation in the IO
monad).  For example,
Action (putStrLn "Add x and y") is the action that writes the
original string.  Input is done through
Action (do val <- getLine
           writeIORef x (read val))
(which sets x to an integer read on stdin).

Goto and Label are simple, and test is straightforward, too,
taking the syntax:
If (testNull varX) [Goto labEnd] [Goto labSub]
etc.

The example program can be embedded as:

bench = [ Action $ do putStrLn "Add x and y"
                      putStr "Input x: "
                      hFlush stdout
                      vx <- getLine
                      writeIORef varX (read vx)
                      putStr "Input y: "
                      hFlush stdout
                      vy <- getLine
                      writeIORef varY (read vy)
        , Label labelMain
        , If (testNull varX) [Goto labelEnd] [Goto labelSub]
        , Label labelSub
        , Action $ do modifyIORef varX (+ (-1))
        , Action $ do modifyIORef varY (+    1)
        , Goto labelMain
        , Label labelEnd
        , Action $ do putStr "The total of x and y is "
                      vy <- readIORef varY
                      print vy
                      putStrLn ""
        ]

I almost didn't cheat: transformation from an AST such as
the one used by Jon is straightforward, but I was interested
in the translation to a function part, so I will write this
part if I have time, later.

The interesting part is, of course, how this is translated
into a real Haskell function.  First, we need to define a
few types:

type Program r a = ContT r IO a
(forget the IO thing ; the meaning of that is that a program
of type a is a continuation of type a).

A tag is a reference to a program returning nothing:
type Tag = IORef (Program r ())
In other words, a Tag is nothing more than the address of the
fragment of code that corresponds to the execution of what
follows the tag in the program.

Finally, the compilation function is:
compile_: [Minim r] -> IO (Program r ())
and a cycle compile/execute is:

main = do a <- compile_ bench
          runContT a return
Please note that runContT is nothing special ; a is a "real"
Haskell lambda-expression.  It is necessary only to avoid syntax
problems.

compile_ is almost straightforward. Readers not used
to Haskell can just replace liftIO by the identity function.
I show code equivalent to the original code, but written
in a more readable fashion for non-users of Haskell.

compile_ [] = return $ liftIO (return ())
-- The empty program does nothing

compile_ (Action a : r) = do seq <- compile_ r
                             return $ do liftIO a
                                         seq

-- Interesting part:
compile_ (Label s : r) =
  do rest <- compile_ r
     liftIO (writeIORef s rest)
     return rest

compile_ (Goto p : r) =
  do compile_ r
     return $ do v <- act
                 v
  where act = liftIO $ readIORef p

------- The real compilation function ---------
compile_ [] = return (liftIO $ return ())

compile_ (Action a : r) = compile_ r >>= (\seq -> return $ liftIO a >>
seq)

compile_ (Goto p : r) = compile_ r >> (return $ (liftIO $ readIORef p)
>>= id)

compile_ (Label s : r) = compile_ r >>=
                         (\rest -> liftIO (writeIORef s rest) >>
return rest)

compile_ (If s l1 l2 : r) = compile_ r >>
    do l1' <- compile_ l1
       l2' <- compile_ l2
       return $ do v <- liftIO s
                   case v of
                     True  -> l1'
                     False -> l2'
--------------------


This approach shows:
 1- that a full-blown "pseudo-compiler" can be written
    in 13 lines of code
 2- that CPS monads in Haskell makes translation of
    spaghetti code (with gotos) almost trivial.
 3- that the interpreter itself (in Jon's solution, there
    is some structure to keep a program counter, and
    execution is performed line by line) is not necessary.
 4- that such an approach gives reasonable speed.

Of course, this is fully translatable to OCaml code.  The
only point is that since I use the ContT monad, the OCaml
code should handle continuations explicitly, which is
error-prone (but nothing the type checker cannot handle).

--
 Frederic

-- Full code begins here
-- File BMinim.hs
-- Compilation: ghc -O3 --make BMinim.hs
-- Execution: ./BMinim
-- Benchmark: exec contains twice the line "100000"
--   frederic $ time ./BMinim < exec
--   real    0m0.044s
--   user    0m0.031s
--   sys     0m0.011s

module Main
    where

import qualified Data.Map as M
import Data.Maybe
import Data.IORef
import Control.Monad.Cont

-- Only necessary for the example
import System.IO
import System.IO.Unsafe

type Program r a = ContT r IO a

type ECont r = Program r ()

type Tag r = IORef (ECont r)

data Minim r = Action (IO ())
             | Goto (Tag r)
             | Label (Tag r)
             | If (IO Bool) [Minim r] [Minim r]

compile_ :: [Minim r] -> IO (Program r ())
compile_ [] = return (liftIO $ return ())

compile_ (Action a : r) = compile_ r >>= (\seq -> return $ liftIO a >>
seq)

compile_ (Goto p : r) = compile_ r >> (return $ (liftIO $ readIORef p)
>>= id)

compile_ (Label s : r) = compile_ r >>=
                         (\rest -> liftIO (writeIORef s rest) >>
return rest)

compile_ (If s l1 l2 : r) = compile_ r >>
    do l1' <- compile_ l1
       l2' <- compile_ l2
       return $ do v <- liftIO s
                   case v of
                     True  -> l1'
                     False -> l2'


--------- Code for the example begins here -------------


labelMain :: IORef (ECont r)
labelMain = unsafePerformIO $ newIORef undefined

labelSub :: IORef (ECont r)
labelSub = unsafePerformIO $ newIORef undefined

labelEnd :: IORef (ECont r)
labelEnd = unsafePerformIO $ newIORef undefined

varX :: IORef Int
varX = unsafePerformIO $ newIORef undefined

varY :: IORef Int
varY = unsafePerformIO $ newIORef undefined

testNull :: IORef Int -> IO Bool
testNull vx = do vx' <- readIORef vx
                 return (vx' == 0)

bench = [ Action $ do putStrLn "Add x and y"
                      putStr "Input x: "
                      hFlush stdout
                      vx <- getLine
                      writeIORef varX (read vx)
                      putStr "Input y: "
                      hFlush stdout
                      vy <- getLine
                      writeIORef varY (read vy)
        , Label labelMain
        , If (testNull varX) [Goto labelEnd] [Goto labelSub]
        , Label labelSub
        , Action $ do modifyIORef varX (+ (-1))
        , Action $ do modifyIORef varY (+    1)
        , Goto labelMain
        , Label labelEnd
        , Action $ do putStr "The total of x and y is "
                      vy <- readIORef varY
                      print vy
                      putStrLn ""
        ]

main :: IO ()
main = do a <- compile_ bench
          runContT a return
From: Jon Harrop
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <46a72515$0$1589$ed2619ec@ptn-nntp-reader02.plus.net>
This is very interesting, thank you.

Frederic wrote:
> ...
> Hence, it integrates seamlessly within 
> the rest of Haskell.  The limitation is that everything is coded
> in CPS (Continuation Passing Style) for easy use of Goto, hence
> the result is somewhat slow (on my machine, 0.04 secs were necessary).
> A better approach would be to replace CPS by exceptions, since a
> Goto does not need to rewind ; performance should then become much
> better, at the cost of purity and elegance.

I don't follow this. How can you replace CPS with exceptions?

> I almost didn't cheat: transformation from an AST such as
> the one used by Jon is straightforward, but I was interested
> in the translation to a function part, so I will write this
> part if I have time, later.

I would very much like to see parsers written in Haskell and Lisp. Also,
note that I omitted the superfluous parentheses from the Minim language in
my parser: the program I'm evaluating contains no brackets.

> This approach shows:
>  1- that a full-blown "pseudo-compiler" can be written
>     in 13 lines of code

I am not sure what the meaning of "pseudo-compiler" is. I would call this an
interpreter that composes functional rather than non-functional values. In
both cases, you can obtain a closure that, when invoked, evaluates a
program. In both cases, performance will be comparable (i.e. neither can
approach the performance of a real compiler like Pascal's).

So I cannot see any difference between this and an ordinary interpreter,
except that an ordinary interpreter has an intermediate representation that
can be visualized whereas a monadic interpreter has a network of closures
that cannot be examined so easily.

>  2- that CPS monads in Haskell makes translation of
>     spaghetti code (with gotos) almost trivial.

Pascal used a similar trick in his Lisp. I never got around to implementing
this in OCaml or MetaOCaml.

However, saying that CPS makes something almost trivial is only useful as a
comparison. In this case, I would say that CPS is significantly more
complicated than iterating a program counter.

>  3- that the interpreter itself (in Jon's solution, there
>     is some structure to keep a program counter, and
>     execution is performed line by line) is not necessary.

Yes. I believe you have traded an explicit program counter for impicit
invocation of continuations. AFAICT, the main advantage of using CPS here
is that it allows you to stage evaluation as Pascal did. This was noted by
the MetaOCaml team in their published literature as well.

So I shall have to transform my interpreter into the CPS/Monadic form before
I can stage it in MetaOCaml. I think the results will be very
interesting... :-)

>  4- that such an approach gives reasonable speed.
> 
> Of course, this is fully translatable to OCaml code.  The
> only point is that since I use the ContT monad, the OCaml
> code should handle continuations explicitly, which is
> error-prone (but nothing the type checker cannot handle).

I suspect the ContT monad can even be expressed in OCaml's type system so
you could easily write in a monadic style in OCaml in this case. Monads
seem like overkill for a 50-line program though. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
OCaml for Scientists
http://www.ffconsultancy.com/products/ocaml_for_scientists/?usenet
From: Frederic Beal
Subject: Re: shootout: implementing an interpreter for a simple procedural language Minim
Date: 
Message-ID: <slrnfaepjk.3fp.beal@clipper.ens.fr>
On 2007-07-25, Frederic <············@gmail.com> wrote:
> On Jul 18, 9:24 am, Mark Tarver <··········@ukonline.co.uk> wrote:
>> \Jon suggested that it would be good to implement some significant
>> programs in different functional languages for comparison.  He
>> suggested interpreters for procedural languages like Basic.
>
> Here is a partial solution in Haskell.  My version of Minim
> is far more expressive than the original one (this comes from
> free with my approach), and there is no need for an interpreter:
> at run-time, the expression is "compiled" into a lambda-expression
> that executes the code.  Hence, it integrates seamlessly within
> the rest of Haskell.  The limitation is that everything is coded
> in CPS (Continuation Passing Style) for easy use of Goto, hence
> the result is somewhat slow (on my machine, 0.04 secs were necessary).
> A better approach would be to replace CPS by exceptions, since a
> Goto does not need to rewind ; performance should then become much
> better, at the cost of purity and elegance.

Now for a very efficient but not so elegant solution : I am
asking template Haskell to 
 1) write a program in C corresponding to the minim program
 2) compile it into a shared library
 3) generate binding to load and execute it at execution time.

I do not post execution time, since it is essentially the same as
a compiled C program.  Here is an example of use:

------------------------------------------------------------------------------
-- File Main.hs
-- Compilation: ghc -fth -fglasgow-exts --make Main.hs
-- In the interpreter: ghci -fth -fglasgow-exts is required
module Main
    where

import CLink
import System.Posix.DynamicLinker 

main :: IO ()
main = $(minimC "foo"
         [ PutL "Add x and y\n"
         , PutL "x = ? "
         , Input "x"
         , PutL "y = ? "
         , Input "y"
         , Label "main"
         , If (Equal 0 "x") (Goto "end") (Goto "loop")
         , Label "loop"
         , Incr "x" (-1)
         , Incr "y" 1
         , Goto "main"
         , Label "end"
         , Output "y"
         , PutL "\n" ]) 
------------------------------------------------------------------------------

The CLink module requires gcc and libtool to be installed.  This runs
on my MacBookPro ; your mileage may vary, and you'll possibly have to
modify the calls to the gcc toolchain.  But as proof-of-concept, it works
just fine.

The process is extremely similar to what Kyoto Lisp (now GCL) does, and
performance is same as C (for obvious reasons).  The front-end is 
almost seamless, though the implementation is ugly and unreliable (well,
pay me consultant fees and I will do better :) ).

-- 
 Frederic


module CLink
    where

import qualified Data.Set as S
import System.IO
import System.Posix.DynamicLinker
import System.Posix.Temp
import System.Process
import Language.Haskell.TH
import Foreign.C
import Foreign.Ptr

type IntF = CInt -> IO ()
foreign import ccall "dynamic" mkFun :: FunPtr IntF -> IntF

decl :: Dec
decl = ForeignD (ImportF CCall 
                         Unsafe 
                         "run" 
                         (mkName "foo")
                         (AppT (ConT (mkName "IO")) (TupleT 0)))


data Test = Equal Int String 
instance Show Test
    where
       show (Equal i s) = (show i) ++ "==" ++ s

varsT (Equal _ s) = S.singleton s

data Prog = Incr String Int
          | Label String
          | Goto String
          | If Test Prog Prog
          | PutL String
          | Input String
          | Output String
          | Group [String] [Prog]

(<+>) = S.union 

vars (Incr s _) = S.singleton s
vars (If t p1 p2) = varsT t <+> vars p1 <+> vars p2
vars (Input s) = S.singleton s
vars (Output s) = S.singleton s
vars (Group v p) = (S.fromList v) <+> (S.unions (map vars p))
vars _ = S.empty

instance Show Prog
    where
      show (Incr a n) = "  " ++ a ++ "+=" ++ (show n) ++ ";\n"
      show (Input s)  = "  scanf(\"%i\", &" ++ s ++ ");\n"
      show (PutL s)   = "  printf(\"%s\", " ++ (show s) ++ ");\n"
      show (Goto s)   = "  goto " ++ s ++ ";\n"
      show (Label s)  = s ++ ":\n"
      show (Output s) = "  printf(\"%i\", " ++ s ++ ");\n"
      show (Group [] p)  = "{" ++ (concat (map show p)) ++ "}"
      show (Group [v] p) = "{ int " ++ v ++ ";\n" ++ (concat (map show p)) ++ "}"
      show (Group (t : q) p) = "{ int " ++ t ++ (concat (map (", "++) q))
                               ++ ";\n" ++ (concat (map show p)) ++ "}"
      show (If t b1 b2) = "  if(" ++ (show t) ++ ")" ++ (show b1) ++ "  else" ++ (show b2)



compileAs :: String -> [Prog] -> String
compileAs name p = "#include <stdio.h>\n\nvoid " ++ 
                   name ++ "(int fooqux)\n" ++ (show gp)
  where
    gp = Group vs p
    vs = S.toList (S.unions $ map vars p)

    run n p = do let nn = "qubar_" ++ n
                 putStrLn $ nn ++ ".c"
                            (n, h) <- mkstemp $ nn ++ ".c"
                            hPutStrLn h (compileAs nn p)
                            hClose h
                            (runCommand $ "gcc -c " ++ nn ++ ".c") >>= waitForProcess
                            (runCommand $ "libtool -dynamic " ++ nn 
                               ++ ".o -lc -o lib" ++ nn ++ ".dylib") >>= waitForProcess
                            runCommand $ "rm " ++ nn ++ ".c"
                            return (nn, "lib" ++ nn ++ ".dylib")

type IOFI = IO (FunPtr IntF)

minimC :: String -> [Prog] -> Q Exp
nWDL  = VarE $ mkName "withDL"
nDLS  = VarE $ mkName "dlsym"
nIOFI = ConT $ mkName "IOFI"

minimC n p = do (obj, lib) <- runIO (run n p)
                return (AppE (AppE (AppE nWDL (LitE $ StringL lib))
                                                   (ListE []))
                                        (lam obj))

  where
    lam o   = LamE [VarP (mkName "dl")] (prog o)
    prog o  = DoE [ s1 o, s2 ]
    s1 o    = BindS (VarP (mkName "kas")) (call1 o)
    call1 o = AppE (AppE nDLS (VarE $ mkName "dl")) (LitE $ StringL o)
    s2      = NoBindS call2
    call2   = AppE (AppE (VarE $ mkName "mkFun") (VarE $ mkName "kas"))
                     (LitE $ IntegerL 42)