From: Jon Harrop
Subject: Very poor Lisp performance
Date: 
Message-ID: <42fd4459$0$97104$ed2619ec@ptn-nntp-reader03.plus.net>
Several people have kindly ported my ray tracer from this page:

  http://www.ffconsultancy.com/free/ray_tracer/languages.html

to Lisp. Some of them are reporting competitive performance. However, when I
try to run their programs with either CMUCL or SBCL they are two orders of
magnitude slower. Given the number of people claiming similarly good
performance, I'd like to know what the possible cause of the relative
slowdown on my computer is?

My system is an unladen 900MHz Athlon T-bird with 768Mb RAM running Debian
testing with SBCL 0.8.16 and CMUCL "19b-release-20050628-3 + minimal debian
patches". Other people have both slower and faster CPUs and more and less
RAM but all are consistently much faster than mine.

I believe SBCL always compiles to native code and I am asking CMUCL to
compile to native code with:

(compile-file "ray4.lisp")
(load "ray4.x86f")
(time (main 6.0 "image.pgm" 256.0 4.0))

Any ideas?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com

From: M Jared Finder
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <HvSdnayKD_1M8mDfRVn-ow@speakeasy.net>
Jon Harrop wrote:
> Several people have kindly ported my ray tracer from this page:
> 
>   http://www.ffconsultancy.com/free/ray_tracer/languages.html
> 
> to Lisp. Some of them are reporting competitive performance. However, when I
> try to run their programs with either CMUCL or SBCL they are two orders of
> magnitude slower. Given the number of people claiming similarly good
> performance, I'd like to know what the possible cause of the relative
> slowdown on my computer is?
> 
> My system is an unladen 900MHz Athlon T-bird with 768Mb RAM running Debian
> testing with SBCL 0.8.16 and CMUCL "19b-release-20050628-3 + minimal debian
> patches". Other people have both slower and faster CPUs and more and less
> RAM but all are consistently much faster than mine.
> 
> I believe SBCL always compiles to native code and I am asking CMUCL to
> compile to native code with:
> 
> (compile-file "ray4.lisp")
> (load "ray4.x86f")
> (time (main 6.0 "image.pgm" 256.0 4.0))
> 
> Any ideas?

Post the ported code and we can take a look at it.  Without the code, we 
can only make shots in the dark.

   -- MJF
From: ········@hotmail.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1123922434.033495.207950@f14g2000cwb.googlegroups.com>
A cautionary note..

Have a look over in c.l.scheme for the uproar around this topic /
poster

chris
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42fdd7a8$0$97130$ed2619ec@ptn-nntp-reader03.plus.net>
M Jared Finder wrote:
> Post the ported code and we can take a look at it.  Without the code, we
> can only make shots in the dark.

Here is Nathan Baum's port for CMUCL and SBCL:

(declaim (optimize (speed 3) (space 0) (debug 0) (safety 0)))

(defconstant infinity most-positive-single-float)
(defconstant delta (the single-float (sqrt single-float-epsilon)))

(declaim (inline vec v* v+ v- dot unitise ray vec make-ray make-sphere x y z
ray_trace))

(defstruct (vec (:conc-name nil) (:constructor vec (x y z)))
  (x 0.0 :type single-float)
  (y 0.0 :type single-float)
  (z 0.0 :type single-float))

(defun v* (s r)
  (declare (single-float s))
    (the vec (vec (* s (x r)) (* s (y r)) (* s (z r)))))

(defmacro defvfun (name op)
  `(defun ,name (a b)
    (vec (,op (x a) (x b))
         (,op (y a) (y b))
         (,op (z a) (z b)))))

(defvfun v+ +)
(defvfun v- -)

(defun dot (a b)
  (+ (* (x a) (x b)) (* (y a) (y b)) (* (z a) (z b))))

(defun unitise (r)
  (the vec (v* (/ 1 (the single-float (sqrt (dot r r)))) r)))

(defstruct (ray (:conc-name nil))
  (orig (vec 0.0 0.0 0.0) :type vec)
  (dir (vec 0.0 0.0 0.0) :type vec))

(defun ray (orig dir) (make-ray :orig orig :dir dir))

(defstruct (sphere (:conc-name nil))
  (center (vec 0.0 0.0 0.0) :type vec)
  (radius 0.0 :type single-float))

(shadow 'group)
(defstruct (group (:conc-name nil) (:include sphere))
  (children () :type list))

(defun ray_sphere (ray sphere)
  (let* ((v    (v- (center sphere) (orig ray)))
         (b    (dot v (dir ray)))
         (disc (+ (- (* b b) (dot v v)) (* (radius sphere) (radius
sphere)))))
    (if (< disc 0.0) infinity
        (let ((disc (sqrt disc)))
          (let ((t2 (+ b disc))
                (t1 (- b disc)))
            (cond ((< t2 0.0) infinity)
                  ((> t1 0.0) t1)
                  (t t2)))))))

(defun intersect (ray scene)
  (labels ((aux (hit scene)
             (destructuring-bind (lam . _) hit
               (declare (ignore _) (single-float lam))
               (etypecase scene
                 (group 
                   (if (>= (ray_sphere ray scene) lam)
                       hit
                       (reduce #'aux (children scene) :initial-value hit)))
                 (sphere
                  (let ((lamt (ray_sphere ray scene)))
                    (if (>= lamt lam) hit
                        (cons lamt (unitise (v- (v+ (orig ray) (v* lamt (dir
ray))) (center scene)))))))))))
    (aux `(,infinity . (vec 0.0 0.0 0.0)) scene)))

(defun ray_trace (light ray scene)
  (destructuring-bind (lam . normal) (intersect ray scene)
    (declare (single-float lam))
    (if (= lam infinity) 0.0
      (let ((g (dot normal light)))
        (if (>= g 0.0) 0.0
          (let ((p (v+ (v+ (orig ray) (v* lam (dir ray))) (v* delta
normal))))
            (destructuring-bind (lam . _)
                (intersect (ray p (v* -1.0 light)) scene)
              (declare (ignore _) (single-float lam))
              (if (< lam infinity) 0.0 (- g)))))))))

(defun create (n c r)
  (declare (single-float r)
           (fixnum n))
  (let ((obj (make-sphere :center c :radius r)))
    (if (= n 1)
        obj
        (let ((rt (* 3.0 (/ r (sqrt 12.0)))))
          (labels ((aux (x z) (create (1- n) (v+ c (vec x rt z)) (/ r
2.0))))
            (make-group :center c
                        :radius (* 3.0 r)
                        :children (list* obj (mapcar #'aux
                                                     (list (- rt) rt (- rt)
rt)
                                                     (list (- rt) (- rt) rt
rt)))))))))

(defun main (level file-name n ss)
  (declare (fixnum level n ss))
  (let ((scene (create level (vec 0.0 -1.0 0.0) 1.0))
        (light (unitise (vec -1.0 -3.0 2.0)))
        (-n/2 (- (/ (float n) 2.0)))
        (1-n/2 (1- (/ (float n) 2.0))))
    (with-open-file (s
file-name :if-exists :supersede :if-does-not-exist :create :direction :output)
      (format s "P5~%~A ~A~%255~%" n n))
    (with-open-file (s file-name :element-type '(unsigned-byte
8) :if-exists :append :direction :output)
      (loop for y of-type single-float from 1-n/2 downto -n/2
            ;;do (sb-ext:gc-off)
            do (print y)
            do (loop for x of-type single-float from -n/2 to 1-n/2
            do (let ((g 0.0))
                 (declare (single-float g))
                 (loop for dx of-type single-float from x below (1+ x) by (/
1.0 ss)
                       do (loop for dy of-type single-float from y below (1+
y) by (/ 1.0 ss)
                                do (let ((d (unitise (vec dx dy (float
n)))))
                                     (incf g (ray_trace light (ray (vec 0.0
0.0 -4.0) d) scene)))))
                 (let ((g (+ 0.5 (* 255.0 (/ g (* (float ss) (float
ss)))))))
                   (write-byte (floor g) s))))))))

#+sbcl (setf (sb-ext:BYTES-CONSED-BETWEEN-GCS) 100000000)

(time (main 6 "image.pgm" 160 4))
(quit)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Richard Fateman
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <iBoLe.4909$O07.3677@newssvr23.news.prodigy.net>
Time for running in Allegro CL 7.0 on a 933 MHz Pentium II.
as given:

; cpu time (non-gc) 60,202 msec (00:01:00.202) user, 421 msec system
; cpu time (gc)     33,623 msec user, 10 msec system
; cpu time (total)  93,825 msec (00:01:33.825) user, 431 msec system
; real time  105,632 msec (00:01:45.632)
; space allocation:
;  967,240 cons cells, 3,719,472,272 other bytes, 4,472 static bytes


But converting all "single-float"  to double-float:

; cpu time (non-gc) 4,446 msec user, 120 msec system
; cpu time (gc)     2,524 msec user, 0 msec system
; cpu time (total)  6,970 msec user, 120 msec system
; real time  7,961 msec
; space allocation:
;  418,422 cons cells, 292,782,544 other bytes, 0 static bytes
The compiler complained about the "shadow" statement
warning: compile-file found "SHADOW" at the top-level --  see the
          documentation for
          comp:*cltl1-compile-file-toplevel-compatibility-p*

My guess is that much of the verbosity for the sbcl version could be struck
out of the allegro cl version without any loss in speed, and that careful attention
to other potential optimizations/ declarations could squeeze out better performance.

It could be that this single/double issue relates to your AMD64 timings.
Most of it may be converting single to double and back.

RJF
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42fe2d62$0$97136$ed2619ec@ptn-nntp-reader03.plus.net>
Richard Fateman wrote:
> Time for running in Allegro CL 7.0 on a 933 MHz Pentium II.
> as given:
> 
> ; cpu time (non-gc) 60,202 msec (00:01:00.202) user, 421 msec system
> ; cpu time (gc)     33,623 msec user, 10 msec system
> ; cpu time (total)  93,825 msec (00:01:33.825) user, 431 msec system
> ; real time  105,632 msec (00:01:45.632)
> ; space allocation:
> ;  967,240 cons cells, 3,719,472,272 other bytes, 4,472 static bytes

These times agree with my own.

> But converting all "single-float"  to double-float:
> 
> ; cpu time (non-gc) 4,446 msec user, 120 msec system
> ; cpu time (gc)     2,524 msec user, 0 msec system
> ; cpu time (total)  6,970 msec user, 120 msec system
> ; real time  7,961 msec
> ; space allocation:
> ;  418,422 cons cells, 292,782,544 other bytes, 0 static bytes
> The compiler complained about the "shadow" statement
> warning: compile-file found "SHADOW" at the top-level --  see the
>           documentation for
>           comp:*cltl1-compile-file-toplevel-compatibility-p*

Very interesting - thanks for that. Unfortunately replacing single-float
with double-float causes SBCL to spew out a lot of error messages. I'll see
if I can figure out why and I'll try CMUCL.

> My guess is that much of the verbosity for the sbcl version could be
> struck out of the allegro cl version without any loss in speed, and that
> careful attention to other potential optimizations/ declarations could
> squeeze out better performance.

Right. Is Allegro CL free?

> It could be that this single/double issue relates to your AMD64 timings.
> Most of it may be converting single to double and back.

Yes. I've no idea what CL says about coercions. The other languages may well
be much more lax in this respect.

It is interesting that single vs double precision has such bizarre and (for
me) unexpected performance implications...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Harald Hanche-Olsen
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <pcoy875mzjz.fsf@shuttle.math.ntnu.no>
+ Jon Harrop <······@jdh30.plus.com>:

| Right. Is Allegro CL free?

It has a free trial version.  <http://www.franz.com/>.

-- 
* Harald Hanche-Olsen     <URL:http://www.math.ntnu.no/~hanche/>
- Debating gives most of us much more psychological satisfaction
  than thinking does: but it deprives us of whatever chance there is
  of getting closer to the truth.  -- C.P. Snow
From: Juho Snellman
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <slrndftqok.289.jsnell@sbz-31.cs.Helsinki.FI>
<·······@cs.berkeley.edu> wrote:
> ;  967,240 cons cells, 3,719,472,272 other bytes, 4,472 static bytes
[...]
> ;  418,422 cons cells, 292,782,544 other bytes, 0 static bytes

Cute. I presume Allegro can completely optimize away the allocation of
temporary structs consisting of only double-floats, but doesn't do
this for single-floats?

-- 
Juho Snellman
"Premature profiling is the root of all evil."
From: Juho Snellman
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <slrndfsf3v.p65.jsnell@sbz-31.cs.Helsinki.FI>
<······@jdh30.plus.com> wrote:
>>> My system is an unladen 900MHz Athlon T-bird with 768Mb RAM running Debian
>>> testing with SBCL 0.8.16 and CMUCL "19b-release-20050628-3 + minimal
>>> debian patches".

That's a rather ancient version of SBCL, you might want to upgrade.
For example:

> (defstruct (vec (:conc-name nil) (:constructor vec (x y z)))
>   (x 0.0 :type single-float)
>   (y 0.0 :type single-float)
>   (z 0.0 :type single-float))

This is using about 2x the memory of what you'd expect for each
instance, and doing an extra memory indirection for each slot access.
Proper support for storing the floats "raw" in the struct was added in
0.9.2 by David Lichteblau.

Another possible pitfall on older SBCLs (<0.8.21) is that they don't
honor the compiler policy for code entered on the repl, but compile it
with low speed, high debug/safety. If you've been pasting the code
into the repl instead of LOADing it, performance would indeed be
horrible. 

-- 
Juho Snellman
"Premature profiling is the root of all evil."
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42fe56e8$0$97109$ed2619ec@ptn-nntp-reader03.plus.net>
Juho Snellman wrote:
> <······@jdh30.plus.com> wrote:
>>>> My system is an unladen 900MHz Athlon T-bird with 768Mb RAM running
>>>> Debian testing with SBCL 0.8.16 and CMUCL "19b-release-20050628-3 +
>>>> minimal debian patches".
> 
> That's a rather ancient version of SBCL, you might want to upgrade.

Debian unstable has 0.9.3. It's upgrading now... :-)

> For example:
> 
>> (defstruct (vec (:conc-name nil) (:constructor vec (x y z)))
>>   (x 0.0 :type single-float)
>>   (y 0.0 :type single-float)
>>   (z 0.0 :type single-float))
> 
> This is using about 2x the memory of what you'd expect for each
> instance, and doing an extra memory indirection for each slot access.
> Proper support for storing the floats "raw" in the struct was added in
> 0.9.2 by David Lichteblau.

I see.

> Another possible pitfall on older SBCLs (<0.8.21) is that they don't
> honor the compiler policy for code entered on the repl, but compile it
> with low speed, high debug/safety. If you've been pasting the code
> into the repl instead of LOADing it, performance would indeed be
> horrible.

Aha! Yes indeed. I just tried (load (compile-file "...")) and it runs in
only 20secs compared to ~250secs from the top-level and 2.5secs for C++.

Thanks for the help. I'll repost when I get results with the new compiler.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Raffael Cavallaro
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <2005081321013216807%raffaelcavallaro@pasdespamsilvousplaitmaccom>
On 2005-08-13 07:18:40 -0400, Jon Harrop <······@jdh30.plus.com> said:

> Here is Nathan Baum's port for CMUCL and SBCL:

just as an additional data point, this code runs in just over 6 seconds 
in sbcl 0.9.3 on a dual 2.0 GHz G5 (though sbcl only uses one 
processor). 
From: Svenne Krap
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff04a9$0$78285$157c6196@dreader1.cybercity.dk>
Raffael Cavallaro wrote:
> On 2005-08-13 07:18:40 -0400, Jon Harrop <······@jdh30.plus.com> said:
> 
>> Here is Nathan Baum's port for CMUCL and SBCL:
> 
> 
> just as an additional data point, this code runs in just over 6 seconds 
> in sbcl 0.9.3 on a dual 2.0 GHz G5 (though sbcl only uses one processor).

On a single Xeon 3.4 GHz with sbcl 0.9.3:

Evaluation took:
   3.783 seconds of real time
   3.305498 seconds of user run time
   0.477927 seconds of system run time
   0 page faults and
   509,576,768 bytes consed
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff1c6e$0$1279$ed2619ec@ptn-nntp-reader02.plus.net>
Svenne Krap wrote:
> Raffael Cavallaro wrote:
>> just as an additional data point, this code runs in just over 6 seconds
>> in sbcl 0.9.3 on a dual 2.0 GHz G5 (though sbcl only uses one processor).
> 
> On a single Xeon 3.4 GHz with sbcl 0.9.3:
> 
> Evaluation took:
>    3.783 seconds of real time
>    3.305498 seconds of user run time
>    0.477927 seconds of system run time
>    0 page faults and
>    509,576,768 bytes consed

On my 1.8GHz AMD64 in 32-bit mode with SBCL 0.9.3 I'm now getting:

    5.21 seconds of real time
    4.15 seconds of user run time
    0.62 seconds of system run time
    0 page faults and
    509,569,248 bytes consed

This seems to be on-par with other people's observations.

This compares to 1.037s for OCaml and 0.987s for C++, so SBCL is now much
more competitive.

Thanks for all the help!

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3m95opF15sleiU1@individual.net>
Jon Harrop wrote:
> On my 1.8GHz AMD64 in 32-bit mode with SBCL 0.9.3 I'm now getting:
> 
>     5.21 seconds of real time
>     4.15 seconds of user run time
>     0.62 seconds of system run time
>     0 page faults and
>     509,569,248 bytes consed
> 
> This seems to be on-par with other people's observations.
> 
> This compares to 1.037s for OCaml and 0.987s for C++, so SBCL is now much
> more competitive.

I wouldn't consider 5 times as slow as a *functional* language very 
competitive, but it might be fast enough for many problems.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff662a$0$97128$ed2619ec@ptn-nntp-reader03.plus.net>
Ulrich Hobelmann wrote:
> I wouldn't consider 5 times as slow as a *functional* language very
> competitive, but it might be fast enough for many problems.

Well, it's relative. Most of the other Lisp/Scheme implementations were two
orders of magnitude slower. Stalin gets even closer than SBCL.

Also, MLton often beats g++, so functional languages aren't slow coaches any
more...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3mznksdx9.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Ulrich Hobelmann wrote:
> > I wouldn't consider 5 times as slow as a *functional* language very
> > competitive, but it might be fast enough for many problems.
> 
> Well, it's relative. Most of the other Lisp/Scheme implementations were two
> orders of magnitude slower. Stalin gets even closer than SBCL.

Which Lisps are you talking about?  We've already seen where Allegro
is faster than this SBCL timing and it hadn't even been optimized yet.
I would be surprised if Lispworks were much different in this regard
as well.  

> Also, MLton often beats g++, so functional languages aren't slow coaches any
> more...

So do many CL implementations on many benchmarks when "properly"
written.  Several have been shown here in the past.  Typically this
starts with the original posting "showing" how bad CL is supposed to
be when comparing optimized C/C++ with naively written CL.  It also
often ends with the CL version beating the optimized C/C++ version.

Most typical of all is that such benchmarks (including this ray
tracing thing) don't have much of anything interesting to say about
anything.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff70f5$0$1314$ed2619ec@ptn-nntp-reader02.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> Ulrich Hobelmann wrote:
>> > I wouldn't consider 5 times as slow as a *functional* language very
>> > competitive, but it might be fast enough for many problems.
>> 
>> Well, it's relative. Most of the other Lisp/Scheme implementations were
>> two orders of magnitude slower. Stalin gets even closer than SBCL.
> 
> Which Lisps are you talking about?

Primarily CMUCL and SBCL.

> We've already seen where Allegro 
> is faster than this SBCL timing and it hadn't even been optimized yet.
> I would be surprised if Lispworks were much different in this regard
> as well.

Is Lispworks free?

>> Also, MLton often beats g++, so functional languages aren't slow coaches
>> any more...
> 
> So do many CL implementations on many benchmarks when "properly"
> written.  Several have been shown here in the past.

What kinds of tasks is Lisp best at, in terms of performance? I Googled for
information on this but most of the sites I found were no longer up.

> Typically this 
> starts with the original posting "showing" how bad CL is supposed to
> be when comparing optimized C/C++ with naively written CL.  It also
> often ends with the CL version beating the optimized C/C++ version.

Can you point me to some examples of this? I heard of a benchmark written
long ago where some Lisp gurus managed to code an equivalently-efficient
implementation in Lisp. However, it is important to know how easily an
efficient version can be written. LOC is a very rudimentary measure of
development time.

> Most typical of all is that such benchmarks (including this ray
> tracing thing) don't have much of anything interesting to say about
> anything.

I think my conclusions were interesting. In particular, I was surprised to
see modern functional language implementations doing so well at what is
arguably their weakest point.

I'd like to do another benchmark with an example from scientific computing
next...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3m9bqeF13q7jmU1@individual.net>
Jon Harrop wrote:
> What kinds of tasks is Lisp best at, in terms of performance? I Googled for
> information on this but most of the sites I found were no longer up.

Why performance at all?  Lisp is good at many things, most notably good 
error recovery (interactive debugger, restarts...), but not for 
high-performance computing.  There you probably want Fortran or C (and 
maybe link them to Lisp).

For symbolic processing, or anything non-number-chrunchy I wouldn't be 
surprised if an application written in Lisp (compiled) isn't a bit 
slower than the same app written in C++ or Java.  But of course nobody 
writes an app in several languages...

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff7f92$0$17484$ed2e19e4@ptn-nntp-reader04.plus.net>
Ulrich Hobelmann wrote:
> Jon Harrop wrote:
>> What kinds of tasks is Lisp best at, in terms of performance? I Googled
>> for information on this but most of the sites I found were no longer up.
> 
> Why performance at all?

I became interested in Lisp's performance because several people advocated
Lisp to me for these kinds of tasks, claiming that it was suitably
efficient. I wanted to test that.

> Lisp is good at many things, most notably good 
> error recovery (interactive debugger, restarts...), but not for
> high-performance computing.  There you probably want Fortran or C (and
> maybe link them to Lisp).

My background is in computational science. Fortran is fine for trivial
programs that just loop over arrays of floats. Mathematica is great for
symbolic computation. But there is a huge gap between those where Fortran
isn't expressive enough and Mathematica isn't efficient enough. Languages
like OCaml, SML, Haskell and Lisp fill that gap.

> For symbolic processing, or anything non-number-chrunchy I wouldn't be
> surprised if an application written in Lisp (compiled) isn't a bit
> slower than the same app written in C++ or Java.  But of course nobody
> writes an app in several languages...

I think it is productive to choose suitable tasks and implement them in
several different languages. It helps other people to learn, e.g. by
comparing C++ code to the equivalent OCaml, and it gives us all an idea of
how efficient and expressive the different languages are.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Luke J Crook
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <HIadne8pK8ustZ3eRVn-hQ@giganews.com>
Jon Harrop wrote:
> jayessay wrote:
>>We've already seen where Allegro 
>>is faster than this SBCL timing and it hadn't even been optimized yet.
>>I would be surprised if Lispworks were much different in this regard
>>as well.
> 
> 
> Is Lispworks free?

How many implementations of Ocaml are there? One. So every developer 
working on Ocaml is hammering that one version. If you want to compare 
Ocaml to an open source language then choose a language for which there 
is but a single implementation; Perl, Python, Parrot, PHP, Ruby etc.

If you want to compare Ocaml to a specific implementation of Lisp, then 
target CMUCL, SBCL, CLISP, Lispworks etc. However, if you want to 
compare the performance of Lisp to the performance of Ocaml then choose 
the fastest implementation of Lisp available and run that. Allego, 
Lispworks and Corman (Corman is Windows only) are all free to you for 
this purpose.

-Luke
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43006c90$0$1220$ed2619ec@ptn-nntp-reader01.plus.net>
Luke J Crook wrote:
> How many implementations of Ocaml are there? One. So every developer
> working on Ocaml is hammering that one version. If you want to compare
> Ocaml to an open source language then choose a language for which there
> is but a single implementation; Perl, Python, Parrot, PHP, Ruby etc.

That doesn't really make any sense.

Firstly, the language comparison is with C++, Java and SML as well, all of
which have multiple implementations. Indeed, two implementations of SML are
already on the language comparison. Secondly, of the languages you listed,
at least Perl and Python have multiple implementations. Finally, the number
of implementations is irrelevant.

> If you want to compare Ocaml to a specific implementation of Lisp, then
> target CMUCL, SBCL, CLISP, Lispworks etc. However, if you want to
> compare the performance of Lisp to the performance of Ocaml then choose
> the fastest implementation of Lisp available and run that. Allego,
> Lispworks and Corman (Corman is Windows only) are all free to you for
> this purpose.

We're Linux only, so Corman is out the window. Which of the other Lisp
implementations do you expect to do well at this task?

I'll have a look at Lispworks...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ff9364$0$657$edfadb0f@dread12.news.tele.dk>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:

>>Well, it's relative. Most of the other Lisp/Scheme implementations were two
>>orders of magnitude slower. Stalin gets even closer than SBCL.

> So do many CL implementations on many benchmarks when "properly"
> written.  Several have been shown here in the past.  Typically this
> starts with the original posting "showing" how bad CL is supposed to
> be when comparing optimized C/C++ with naively written CL.  It also
> often ends with the CL version beating the optimized C/C++ version.
> 
> Most typical of all is that such benchmarks (including this ray
> tracing thing) don't have much of anything interesting to say about
> anything.

The comp.lang.lisp thread on Almabench comes into mind.
Rif summed it up pretty nicely:

     So what have we learned?  We confirmed what we pretty much
     knew: you can write a C program in CL, at which point the
     relative speed of your C and CL versions will depend on the
     relative quality of the code generation.

I am sure Jon can pick up some nice tricks in the thread.

<http://groups.google.com/group/comp.lang.lisp/msg/ae59ca28867ced78?hl=en&lr=&ie=UTF-8&rnum=110>

-- 
Jens Axel S�gaard
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42ffd8a4$0$97142$ed2619ec@ptn-nntp-reader03.plus.net>
Jens Axel S�gaard wrote:
> The comp.lang.lisp thread on Almabench comes into mind.
> Rif summed it up pretty nicely:

Almabench. That's the one I'd heard of before.

>      So what have we learned?  We confirmed what we pretty much
>      knew: you can write a C program in CL, at which point the
>      relative speed of your C and CL versions will depend on the
>      relative quality of the code generation.
> 
> I am sure Jon can pick up some nice tricks in the thread.
> 
>
<http://groups.google.com/group/comp.lang.lisp/msg/ae59ca28867ced78?hl=en&lr=&ie=UTF-8&rnum=110>

I hadn't actually read the implementation before now but I've got to say
that my ray tracer is a whole lot more fun. :-)

I'll check out the thread in more detail - thanks for the link.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124107965.549795.269900@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Jens Axel Søgaard wrote:
> > The comp.lang.lisp thread on Almabench comes into mind.
> > Rif summed it up pretty nicely:
>
> Almabench. That's the one I'd heard of before.

Only for the record: google around in comp.lang.scheme and see my
version for Bigloo of the Almabench if you are interested.

Schneewittchen
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124107691.257338.72640@g14g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
> > I wouldn't consider 5 times as slow as a *functional* language very
> > competitive, but it might be fast enough for many problems.
>
> Well, it's relative. Most of the other Lisp/Scheme implementations were two
> orders of magnitude slower. Stalin gets even closer than SBCL.
>
> Also, MLton often beats g++, so functional languages aren't slow coaches any
> more...

Jon: You may not forget that you are testing for micro benchmarks.
Surely, a Python will never become faster in the long run. However, all
the things will change if we were benchmarking really complicated code
of many thousand of lines.

When I was younger (now I am 31) I also believed benchmarking is a
must. However, in the meantime it is even this: if your ray-tracer in
OCaml were 100000 times faster than the Bigloo-Scheme version I would
not opt for OCaml quickly. Perfromance is important, no question, but
in a code project it is a rather tiny part.


Schneewittchen
From: Michael Sullivan
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1h1bxgc.rjim7q12zaoegN%use-reply-to@spambegone.null>
F�rster vom Silberwald <··········@hotmail.com> wrote:

> When I was younger (now I am 31) I also believed benchmarking is a
> must. However, in the meantime it is even this: if your ray-tracer in
> OCaml were 100000 times faster than the Bigloo-Scheme version I would
> not opt for OCaml quickly. Perfromance is important, no question, but
> in a code project it is a rather tiny part.

Depends on the code project, doesn't it?  If you're doing primarily
heavy duty (but medium complexity) mathematical manipulations, then
speed is a big factor.

I agree that in most problem domains, constant time speed factors are
pretty much irrelevant for 95% of code, and we should be very skeptical
about judging "language speed" by simple benchmarks like this, because
an easier to program language, often leads to better algorithms, as well
as a lot less programming time (and less downside variation in program
speed -- cf: Erann's lisp-java-c experiment: the fastest C programs just
beat the fastest lisp programs, but many of the C programs were *much*
slower than the slowest lisp programs).     

But those tradeoffs aren't the same for all problem domains.  In those
rare problem domains where there's always lots time to optimize relative
to the runtime available, bare speed matters.

The problem is that it's still hard to tell the difference between a
faster language and better writers of optimized code, as the almabench
thread demonstrates.  How much time do you spend on optimization before
you abandon the project and say "It's easier to make this fast in X".
And obviously some things will be easier to optimize in language Y than
others.  All this makes any given benchmark or set of benchmarks suspect
as an absolute measure of speed, but it doesn't completely invalidate
their potential usefulness, as long as one understands what they mean
and what they do not mean. 


Michael
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4300fc41$0$17460$ed2e19e4@ptn-nntp-reader04.plus.net>
Michael Sullivan wrote:
> F�rster vom Silberwald <··········@hotmail.com> wrote:
>> When I was younger (now I am 31) I also believed benchmarking is a
>> must. However, in the meantime it is even this: if your ray-tracer in
>> OCaml were 100000 times faster than the Bigloo-Scheme version I would
>> not opt for OCaml quickly. Perfromance is important, no question, but
>> in a code project it is a rather tiny part.
> 
> Depends on the code project, doesn't it?  If you're doing primarily
> heavy duty (but medium complexity) mathematical manipulations, then
> speed is a big factor.

Yes, absolutely. I am primarily interested in the middle ground between slow
but hugely expressive languages like Mathematica and "fast" but archaic
languages like Fortran. Functional languages are great for this middle
ground and they are improving at such a rate that they've been eating into
the remits of Mathematica and Fortran a lot.

> I agree that in most problem domains, constant time speed factors are
> pretty much irrelevant for 95% of code, and we should be very skeptical
> about judging "language speed" by simple benchmarks like this, because
> an easier to program language, often leads to better algorithms, as well
> as a lot less programming time (and less downside variation in program
> speed -- cf: Erann's lisp-java-c experiment: the fastest C programs just
> beat the fastest lisp programs, but many of the C programs were *much*
> slower than the slowest lisp programs).

This is exactly why I measure LOC. From my results, the LOC measurements
indicate that code size will become a limiting factor much more quickly for
Java than for OCaml, for example. Although this is a very crude measure
(there are a huge number of language features, like a decent type system,
that aid productivity), my quantitative result reflect my experience.

However, LOC overly penalises Lisp and Scheme, IMHO. Specifically, Lisp and
Scheme programs are virtually unreadable unless the parentheses are
staggered by spreading expressions over several lines and using an
automatic indenter. So if I were to put a Lisp implementation of the ray
tracer on my site then I'd either state that, or I'd give results using
some other measure of verbosity, like characters. I do think Lisp deserves
to be somewhat penalised in this way, but LOC goes too far.

> But those tradeoffs aren't the same for all problem domains.  In those
> rare problem domains where there's always lots time to optimize relative
> to the runtime available, bare speed matters.

I think it is also important to measure the performance of "natural" code.
In Lisp, natural code is often understood very generically by the compiler.
In most other languages, you have to work to get that generality.

> The problem is that it's still hard to tell the difference between a
> faster language and better writers of optimized code, as the almabench
> thread demonstrates.  How much time do you spend on optimization before
> you abandon the project and say "It's easier to make this fast in X".
> And obviously some things will be easier to optimize in language Y than
> others.  All this makes any given benchmark or set of benchmarks suspect
> as an absolute measure of speed, but it doesn't completely invalidate
> their potential usefulness, as long as one understands what they mean
> and what they do not mean.

Yes, absolutely. I do think my ray tracer is a suboptimal benchmark, of
course, but I was astonished that so many people put so much effort into
that almabench benchmark when (IMHO) it was an appalingly narrow and badly
coded test that isn't representative of any real programs, in my experience
as a computational physicist. I'm glad that this hasn't descended into a
"Lisp sux0rz" conversation though.

In contrast, my ray tracer involves integer and floating point arithmetic,
3D vectors, trees, tagged unions, recursion/iteration, naturally functional
(e.g. vector ops and ray_sphere) and imperative (e.g. for loops) style. So
it tests quite a lot using very little code and it is fun to play with.
This diversity also makes the collection of ray tracers a better
educational tool - people can glance at the code and see how different
things are implemented in different languages.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <WqfMe.1588$Dd.6727@newscontent-01.sprint.ca>
Jon Harrop wrote:
> ...
> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically, Lisp and
> Scheme programs are virtually unreadable unless the parentheses are
> staggered by spreading expressions over several lines and using an
> automatic indenter. So if I were to put a Lisp implementation of the ray
> tracer on my site then I'd either state that, or I'd give results using
> some other measure of verbosity, like characters.

i doubt lisp or scheme will gain anything there:  the language defined 
words tend to be quite lengthy, and afaict that seems to encourage 
programmers to use pretty length identifiers for their own identifiers, 
so character count might penalize lisp even worse.  otoh, the lengthy 
identifiers make lisp code quite easy to read and understand.

token count probably would be better

> ...

hs
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddsa0j$fk6$1@nwrdmz02.dmz.ncs.ea.ibs-infra.bt.com>
"Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message 
·······················@newscontent-01.sprint.ca...
> Jon Harrop wrote:
>> ...
JH>> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically, Lisp 
and
JH>> Scheme programs are virtually unreadable unless the parentheses are
JH>> staggered by spreading expressions over several lines and using an
JH>> automatic indenter. So if I were to put a Lisp implementation of the 
ray
JH>> tracer on my site then I'd either state that, or I'd give results using
JH>> some other measure of verbosity, like characters.

Hmm.  What would you (JH) be measuring here?

a) Keystrokes required to produce the code (see below, though)
b) Some kind of 'intrinsic verbosity', which would require some *serious* 
thinking about idiomaticity, relevance of formatting and massive, massive 
sampling to make it statistically relevant.


> i doubt lisp or scheme will gain anything there:  the language defined 
> words tend to be quite lengthy, and afaict that seems to encourage 
> programmers to use pretty length identifiers for their own identifiers,

Yes

> so character count might penalize lisp even worse.  otoh, the lengthy 
> identifiers make lisp code quite easy to read and understand.

Yes, and using a decent editor with auto-completion (Emacs) means that I hit 
less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B <META-TAB> ) than 
you might think.

Oh, and all the ')))))' you see probably didn't get typed by hand ( 
<META-RET> closes all open parens).

>
> token count probably would be better

Yep, although (because I am biased) I would like to see 
'keystroke/mouse-click' count instead.  I think that with the requirement 
for idiomatic variable naming, CL might not come out as 'verbose' as you 
think...

>
>> ...
>
> hs 
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddsmbr$emv$1@nwrdmz03.dmz.ncs.ea.ibs-infra.bt.com>
"Jamie Border" <·····@jborder.com> wrote in message 
·················@nwrdmz02.dmz.ncs.ea.ibs-infra.bt.com...
>
> "Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message 
> ·······················@newscontent-01.sprint.ca...
>> Jon Harrop wrote:
>>> ...
> JH>> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically, 
> Lisp and
> JH>> Scheme programs are virtually unreadable unless the parentheses are
> JH>> staggered by spreading expressions over several lines and using an
> JH>> automatic indenter. So if I were to put a Lisp implementation of the 
> ray
> JH>> tracer on my site then I'd either state that, or I'd give results 
> using
> JH>> some other measure of verbosity, like characters.
>
> Hmm.  What would you (JH) be measuring here?
>
> a) Keystrokes required to produce the code (see below, though)
> b) Some kind of 'intrinsic verbosity', which would require some *serious* 
> thinking about idiomaticity, relevance of formatting and massive, massive 
> sampling to make it statistically relevant.
>
>
>> i doubt lisp or scheme will gain anything there:  the language defined 
>> words tend to be quite lengthy, and afaict that seems to encourage 
>> programmers to use pretty length identifiers for their own identifiers,
>
> Yes
>
>> so character count might penalize lisp even worse.  otoh, the lengthy 
>> identifiers make lisp code quite easy to read and understand.
>
> Yes, and using a decent editor with auto-completion (Emacs) means that I 
> hit less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B 
> <META-TAB> ) than you might think.
>
> Oh, and all the ')))))' you see probably didn't get typed by hand ( 
> <META-RET> closes all open parens).

^^^^^^^^^^^^ at least at the slime REPL anyway <oops>

>
>>
>> token count probably would be better
>
> Yep, although (because I am biased) I would like to see 
> 'keystroke/mouse-click' count instead.  I think that with the requirement 
> for idiomatic variable naming, CL might not come out as 'verbose' as you 
> think...
>
>>
>>> ...
>>
>> hs
>
> 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43021724$0$97107$ed2619ec@ptn-nntp-reader03.plus.net>
Jamie Border wrote:
> "Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message
> ·······················@newscontent-01.sprint.ca...
>> Jon Harrop wrote:
>>> ...
> JH>> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically,
> Lisp and
> JH>> Scheme programs are virtually unreadable unless the parentheses are
> JH>> staggered by spreading expressions over several lines and using an
> JH>> automatic indenter. So if I were to put a Lisp implementation of the
> ray
> JH>> tracer on my site then I'd either state that, or I'd give results
> using JH>> some other measure of verbosity, like characters.
> 
> Hmm.  What would you (JH) be measuring here?
> 
> a) Keystrokes required to produce the code (see below, though)
> b) Some kind of 'intrinsic verbosity', which would require some *serious*
> thinking about idiomaticity, relevance of formatting and massive, massive
> sampling to make it statistically relevant.

Both. As you say, it is so inherently flawed that there is little point
wasting time thinking about it. For the time being, I don't believe LOC can
be significantly improved upon.

>> so character count might penalize lisp even worse.  otoh, the lengthy
>> identifiers make lisp code quite easy to read and understand.
> 
> Yes, and using a decent editor with auto-completion (Emacs) means that I
> hit less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B <META-TAB>
> ) than you might think.
> 
> Oh, and all the ')))))' you see probably didn't get typed by hand (
> <META-RET> closes all open parens).

Cool. :-)

>> token count probably would be better
> 
> Yep, although (because I am biased) I would like to see
> 'keystroke/mouse-click' count instead.  I think that with the requirement
> for idiomatic variable naming, CL might not come out as 'verbose' as you
> think...

Is Lisp code not made less maintainable because of all those brackets?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3acjh7phy.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> >> token count probably would be better
> > 
> > Yep, although (because I am biased) I would like to see
> > 'keystroke/mouse-click' count instead.  I think that with the requirement
> > for idiomatic variable naming, CL might not come out as 'verbose' as you
> > think...
> 
> Is Lisp code not made less maintainable because of all those brackets?

Less???  LOL!


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Christophe Rhodes
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <sqd5odbz6d.fsf@cam.ac.uk>
Jon Harrop <······@jdh30.plus.com> writes:

> Jamie Border wrote:
>> Yep, although (because I am biased) I would like to see
>> 'keystroke/mouse-click' count instead.  I think that with the requirement
>> for idiomatic variable naming, CL might not come out as 'verbose' as you
>> think...
>
> Is Lisp code not made less maintainable because of all those brackets?

No, it is made more maintainable because of all those brackets,
because it is straightforward to write tools which can manipulate the
textual representation of your program, and because human programmers
do not read the brackets.

Christophe
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <slx9ad05.fsf@ccs.neu.edu>
> Jon Harrop <······@jdh30.plus.com> writes:
>>
>> Is Lisp code not made less maintainable because of all those brackets?

Christophe Rhodes <·····@cam.ac.uk> writes:

> No, it is made more maintainable because of all those brackets,


> Jon Harrop <······@jdh30.plus.com> writes:
>
> Consider the example:
> 
> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))
> 
> In ML this is:
> 
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
> 
> That may be easier to parse for the machine (I don't think it is though) but
> maintainability is about how easily a human can parse it.


> "Rob Thorpe" <·············@antenova.com>
>
> Some people find it readable, some don't.  I personally don't find it
> very easy or very difficult.

> Brian Downing <·············@lavos.net>
>
> Also, I consider the reformatted Lisp to be more readable than the ML,
> but then, it's what I'm used to.


Is readability simply a subjective measure, then?  If so, and if
maintainability is about how easily a human can parse it, then
maintainability is also a subjective measure (and not particularly
interesting for comparing computer languages).

It seems likely to me that languages that require complex parsers are
harder for humans to understand as well.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43026d88$0$17486$ed2e19e4@ptn-nntp-reader04.plus.net>
Joe Marshall wrote:
> Is readability simply a subjective measure, then?  If so, and if
> maintainability is about how easily a human can parse it, then
> maintainability is also a subjective measure (and not particularly
> interesting for comparing computer languages).

Yes. Readability and maintainability are inherently subjective. However,
they are both very important when comparing computer languages.

For example, nobody in their right mind would consider writing production
code in Whitespace or Brainf*** because they are clearly less readable and
maintainable, even though it is subjective. That is a clear-cut case, but
with Lisp vs ML it is not so simple, IMHO.

> It seems likely to me that languages that require complex parsers are
> harder for humans to understand as well.

There is unquestionably a huge amount of evidence to the contrary. Most
natural and programming languages have complicated grammars precisely
because it simplifies their use and makes them easier to understand.

Additionally (pun intended), we were all taught operator precedences in
conventional mathematics at a very young age. It seems at best odd and at
worst stupid to disregard this.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Tayssir John Gabbour
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124238999.593696.42790@g49g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age. It seems at best odd and at
> worst stupid to disregard this.

I hope there's little disregarding or idiocy going on. Many Lisp users
use other languages regularly, so it's understandably hard for Lisp's
unusual syntax to be lost on them.

I recall that early programming languages were designed to save money
(increase programmer supply) by opening the door to less skilled
programmers. A natural way to attempt this is by offering them an
interface similar to what they've already encountered in school. Like
highschool math notation, for instance.

But what happens when we stray from highschool textbook Fibonacci
implementations? Some languages look great for solving Tower of Hanoi,
but not so enviable in other domains. This is why people look towards
"domain specific languages," and Lisp creeps into the conversation.

Now despite all this, Lisp DOES respect other traditions. Observe LOOP,
which is like every for(;;) loop in every language rolled into one.
Dirty and rewarding, the sort of thing which probably makes Haskellers
and MLers scream indignantly. Also, there are infix parsers and whatnot
floating around the net, for when conventional syntax is appropriate.


> > It seems likely to me that languages that require complex parsers are
> > harder for humans to understand as well.
>
> There is unquestionably a huge amount of evidence to the contrary. Most
> natural and programming languages have complicated grammars precisely
> because it simplifies their use and makes them easier to understand.

Would you please point us to evidence in this direction?


Jon Harrop wrote:
> Joe Marshall wrote:
> > Is readability simply a subjective measure, then?  If so, and if
> > maintainability is about how easily a human can parse it, then
> > maintainability is also a subjective measure (and not particularly
> > interesting for comparing computer languages).
>
> Yes. Readability and maintainability are inherently subjective. However,
> they are both very important when comparing computer languages.

Writing honest language vs. language benchmarks is notoriously full of
landmines. Comparing readability sounds like its own Iraq.

Lisp is different, there's no question about that. It may be outflanked
in specific areas; and today's Common Lisp is simply one little
milestone in its evolution. Once, it was believed that an M-expression
syntax would replace the current S-expressions, but many liked sexps.
In the future, and given enough funding, we can wishfully predict human
interface improvements where IDEs project Lisp code in some
conventional way to the user, just as webpages don't look like HTML
markup.

Or perhaps sexps lead the way to a more sensible syntax than the weaker
one which mathematicians developed. Many claim that Newton's calculus
notation is less flexible than Leibniz's, and no doubt various number
systems have varying disadvantages. Perhaps Fermat's margin wouldn't
have been a problem if he had Lisp macros.


Tayssir
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4302926f$0$1306$ed2619ec@ptn-nntp-reader02.plus.net>
Tayssir John Gabbour wrote:
> Jon Harrop wrote:
>> Additionally (pun intended), we were all taught operator precedences in
>> conventional mathematics at a very young age. It seems at best odd and at
>> worst stupid to disregard this.
> 
> I hope there's little disregarding or idiocy going on. Many Lisp users
> use other languages regularly, so it's understandably hard for Lisp's
> unusual syntax to be lost on them.

Yes. This brings me to my next question: what other FPLs do Lispers know?

> Now despite all this, Lisp DOES respect other traditions. Observe LOOP,
> which is like every for(;;) loop in every language rolled into one.
> Dirty and rewarding, the sort of thing which probably makes Haskellers
> and MLers scream indignantly.

Not really. OCaml has (more restricted) for loops and both SML and OCaml can
implement customised for loops.

> Also, there are infix parsers and whatnot 
> floating around the net, for when conventional syntax is appropriate.

I heard that infix parsers are rarely used not because infix is worse but
because infix is unusual in Lisp code and increases incompatibility. Would
you agree with that?

>> There is unquestionably a huge amount of evidence to the contrary. Most
>> natural and programming languages have complicated grammars precisely
>> because it simplifies their use and makes them easier to understand.
> 
> Would you please point us to evidence in this direction?

Firstly, do you agree that languages are evolving to be more concise?
Secondly, do you agree that more concise languages tend to have more
complicated grammars? Finally, what other reason could drive this
association?

I believe that languages are evolving to be more concise and to have more
complicated grammars. I can see no reason for complicating grammars unless
it aids brevity/elegance/comprehensibility. So I see the evolution of
natural and programming languages as a huge amount of evidence that
complicated grammars are used to simplify the use of languages.

IMHO, humans are very good at deciphering expressions written in complicated
grammars, and this is why we make things easier for ourselves by
complicating grammars. In particular, we are better at understanding many
short expressions written in the context of a complicated grammar, rather
than many long expressions written with a very simple grammar.

>> Yes. Readability and maintainability are inherently subjective. However,
>> they are both very important when comparing computer languages.
> 
> Writing honest language vs. language benchmarks is notoriously full of
> landmines. Comparing readability sounds like its own Iraq.

Yes, absolutely.

> Lisp is different, there's no question about that. It may be outflanked
> in specific areas; and today's Common Lisp is simply one little
> milestone in its evolution. Once, it was believed that an M-expression
> syntax would replace the current S-expressions, but many liked sexps.
> In the future, and given enough funding, we can wishfully predict human
> interface improvements where IDEs project Lisp code in some
> conventional way to the user, just as webpages don't look like HTML
> markup.

Several people have now mentioned IDEs and I think that is something that I
have ignored until now and should probably spend some time on...

> Or perhaps sexps lead the way to a more sensible syntax than the weaker
> one which mathematicians developed. Many claim that Newton's calculus
> notation is less flexible than Leibniz's, and no doubt various number
> systems have varying disadvantages. Perhaps Fermat's margin wouldn't
> have been a problem if he had Lisp macros.

:-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Tayssir John Gabbour
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124246583.162032.70770@z14g2000cwz.googlegroups.com>
Jon Harrop wrote:
> Tayssir John Gabbour wrote:
> > Now despite all this, Lisp DOES respect other traditions. Observe LOOP,
> > which is like every for(;;) loop in every language rolled into one.
> > Dirty and rewarding, the sort of thing which probably makes Haskellers
> > and MLers scream indignantly.
>
> Not really. OCaml has (more restricted) for loops and both SML and OCaml can
> implement customised for loops.

If this is true, MLers may be more mature (or some might say twisted)
than many Lisp users, as LOOP is offensive to a percentage of Lisp
users for being "unlispy." Maybe too hedonistic. ;)

I think comparing LOOP to a for-loop is like comparing [insert apt
mountain vs. molehill comparison here]. I like it a lot though.


> > Also, there are infix parsers and whatnot
> > floating around the net, for when conventional syntax is appropriate.
>
> I heard that infix parsers are rarely used not because infix is worse but
> because infix is unusual in Lisp code and increases incompatibility. Would
> you agree with that?

Well, almost every domain-specific language is unusual. But Lisp
invites people to craft such languages when appropriate for readability
and maintenance.

For example, times and dates don't look "lispy" (8:34 at 8-2-2005, or
RFC 2445's 19980118T230000), but I have "read-macros" which allow me to
use this unusual syntax in sourcecode when performing datetime
calculations.

People shouldn't go nuts though. Any given paradigm has a sweet spot
past which it becomes increasingly questionable to push:
http://lisp.tech.coop/lisp-user-meeting-amsterdam-april-2004#macros-and-codewalkers


> >> There is unquestionably a huge amount of evidence to the contrary. Most
> >> natural and programming languages have complicated grammars precisely
> >> because it simplifies their use and makes them easier to understand.
> >
> > Would you please point us to evidence in this direction?
>
> Firstly, do you agree that languages are evolving to be more concise?
> Secondly, do you agree that more concise languages tend to have more
> complicated grammars? Finally, what other reason could drive this
> association?

I feel I'm expected to be in some role debating you (maybe a weird
spidey-sense left over from debate team), but I'm genuinely interested
in the evidence you mentioned. So I'm still curious what evidence there
exists, and don't feel suited to analytical debates.

That said, there are examples of languages designed both towards and
away from syntactic concision.

APL/J/K seem to beautifully use concision. If Alan Kay is to be
believed, one screen or so essentially holds short-term memory, and
concise languages certainly help here. (I hope I'm not butchering his
point.)
http://www.archive.org/details/AlanKeyD1987_2

On the other end of the scale, I recall natural languages are quite
verbose, relative to information content. Java is also quite verbose,
more than I like, but it seems to be very popular...

Lisp is a bit tricky to place on this spectrum, since it contains
uncommon abstraction tools like macros and whatnot. Many people (I
think convincingly) point out that many of Lisp's features don't make
much sense in small codebases; they spread their wings in large ones.
So there are different paths to judging concision.


Tayssir
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4303ed70$0$1310$ed2619ec@ptn-nntp-reader02.plus.net>
Tayssir John Gabbour wrote:
>> Firstly, do you agree that languages are evolving to be more concise?
>> Secondly, do you agree that more concise languages tend to have more
>> complicated grammars? Finally, what other reason could drive this
>> association?
> 
> I feel I'm expected to be in some role debating you (maybe a weird
> spidey-sense left over from debate team), but I'm genuinely interested
> in the evidence you mentioned. So I'm still curious what evidence there
> exists, and don't feel suited to analytical debates.

Go to the shootout page, use CRAPS to sort the results by LOC. I think there
is an association between LOC and age, with languages like OCaml/Python/D
at the top and languages like Fortran/Pascal/Forth at the bottom. This
could be made more objective by plotting the age of each language vs its
LOC score.

> That said, there are examples of languages designed both towards and
> away from syntactic concision.

What languages are designed to be verbose?

> On the other end of the scale, I recall natural languages are quite
> verbose, relative to information content. Java is also quite verbose,
> more than I like, but it seems to be very popular...

Other people have said that but I have to disagree. Someone posted saying
that C++ and Java are verbose. They are not if you compare them to older
languages like Fortran and assembler. If you consider the amount of code
required to implement even fairly trivial problems, C++ and Java programs
are succinct compared to the equivalent Fortran.

My ray tracer, for example, is ~70 LOC in OCaml/SML, ~100-110LOC in
C++/Java/Lisp and ~180LOC in Fortran. Even then, the Fortran isn't
equivalent because it uses a specialised data structure.

> Lisp is a bit tricky to place on this spectrum, since it contains
> uncommon abstraction tools like macros and whatnot.

There are several other languages with equivalent capabilities so Lisp
certainly isn't unique in this respect. Lisp is, however, much older than
the others.

> Many people (I 
> think convincingly) point out that many of Lisp's features don't make
> much sense in small codebases; they spread their wings in large ones.
> So there are different paths to judging concision.

Yes. If my belief that the asymptotic size of programs increases differently
for different types of language then you are right that there is no point
in trying to compare the constant prefactor. Nevertheless, it is
interesting to compare program sizes for some real tasks...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Tayssir John Gabbour
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124359640.690272.160870@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Tayssir John Gabbour wrote:
> > That said, there are examples of languages designed both towards and
> > away from syntactic concision.
>
> What languages are designed to be verbose?

* Anything more verbose than APL that was designed by someone vaguely
aware of it. APL's very concise notation was developed in the 1950s,
and published in 1962. To judge verbosity, APL/J/K seems a very good
benchmark.

"These cryptic symbols, some have joked, make it possible to construct
an entire air traffic control system in two lines of code. Indeed, in
some versions of APL, it is theoretically possible to express any
computable function in one expression, that is in one line of code. You
can use the other line for I/O, or constructing a GUI. Because of its
condensed nature and non-standard characters, APL has sometimes been
termed a 'write-only language', and reading an APL program can at first
feel like decoding an alien tongue."
http://en.wikipedia.org/wiki/APL_programming_language

*  Anything with a fairly human-language frontend. Or often one that
emphasizes "readability," such as Python, which seems more verbose than
Perl (though I've used Perl only rarely; still, I did a quick search on
"perl python verbosity" and it appears people find the newer language
more verbose than the older).

* Some languages have a culture geared towards using less abbreviated
names. Common Lisp's operation "multiple-value-bind" is certainly not
an attempt to be concise. In fact, we can run a quality obfuscator on
Java or C# and see an attempt at concision at work.

* Many newer statically-typed languages (like C# and Java) don't use
type inferencing, adding to verbosity.

* Java goes nuts with access control and other boilerplate code. I
think less concise than Common Lisp, particularly since it lacks
built-in macros.

* Anything which compiles to smaller object-code or executable sizes.
;)


(Incidentally, remarkable concision also doesn't necessarily correlate
with coding speed, over different languages. I've observed languages
like Haskell achieved high concision on various algorithmic problems,
much more so than Common Lisp. But when people report the time involved
to code them, it appears the time is wildly divergent from coding in
Lisp. 1 min/line in Lisp appears to be a commonly reported coding rate,
for whatever reason. When some announced a coding productivity shootout
here on usenet [Frank Buss's?], I was very surprised that Haskellers
seemed to mull things over for a long while, then spit out remarkably
concise solutions.)


> > On the other end of the scale, I recall natural languages are quite
> > verbose, relative to information content. Java is also quite verbose,
> > more than I like, but it seems to be very popular...
>
> Other people have said that but I have to disagree. Someone posted saying
> that C++ and Java are verbose. They are not if you compare them to older
> languages like Fortran and assembler. If you consider the amount of code
> required to implement even fairly trivial problems, C++ and Java programs
> are succinct compared to the equivalent Fortran.

Java is verbose if you compare it to APL and Common Lisp, both of which
are older. Maybe by orders of magnitude.

XML-based languages are generally remarkably more verbose than older
sexp-based ones like Common Lisp.


> >>>> There is unquestionably a huge amount of evidence to the contrary. Most
> >>>> natural and programming languages have complicated grammars precisely
> >>>> because it simplifies their use and makes them easier to understand.
> >>>
> >>> Would you please point us to evidence in this direction?
> >>
> >> [...]
> >
> > I feel I'm expected to be in some role debating you (maybe a weird
> > spidey-sense left over from debate team), but I'm genuinely interested
> > in the evidence you mentioned. So I'm still curious what evidence there
> > exists, and don't feel suited to analytical debates.
>
> Go to the shootout page, use CRAPS to sort the results by LOC. I think there
> is an association between LOC and age, with languages like OCaml/Python/D
> at the top and languages like Fortran/Pascal/Forth at the bottom. This
> could be made more objective by plotting the age of each language vs its
> LOC score.

Are you referring to a page like this shootout linked to from your
ffconsultancy.com page?
http://shootout.alioth.debian.org/sandbox/benchmark.php?test=wc&lang=all&sort=lines

Incidentally, if you have any (perhaps published) papers in mind
defending this thesis, I'd be grateful.


Tayssir
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4304d5a3$0$1298$ed2619ec@ptn-nntp-reader02.plus.net>
Tayssir John Gabbour wrote:
> Java is verbose if you compare it to APL and Common Lisp, both of which
> are older. Maybe by orders of magnitude.

But you're picking pairs of outliers. Compare Java to Fortran and OCaml to
Lisp. Better still, just plot date of conception vs some measure of
verbosity.

>> Go to the shootout page, use CRAPS to sort the results by LOC. I think
>> there is an association between LOC and age, with languages like
>> OCaml/Python/D at the top and languages like Fortran/Pascal/Forth at the
>> bottom. This could be made more objective by plotting the age of each
>> language vs its LOC score.
> 
> Are you referring to a page like this shootout linked to from your
> ffconsultancy.com page?
>
http://shootout.alioth.debian.org/sandbox/benchmark.php?test=wc&lang=all&sort=lines

The Computer Language Shootout, yes. Use CRAPs to average over all results.

> Incidentally, if you have any (perhaps published) papers in mind
> defending this thesis, I'd be grateful.

What thesis?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3br3v9gqf.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:


OK, I know you claimed "huge amounts" of evidence for your other
claims concerning verbosity, conciseness, high expressivity, and
complex vs simple syntax.  But so far you haven't actually offered
any.  I'm not asking for "huge amounts", I'm asking you for _any_
evidence.  Do you have any references to studies supporting your
claims?  I mean, if there are "huge amounts" you would think that
somebody somewhere would have done a study or two and you'd have at
least that reference in your back pocket to trot out in situations
like this.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4304fe3d$0$22916$ed2619ec@ptn-nntp-reader01.plus.net>
jayessay wrote:
> OK, I know you claimed "huge amounts" of evidence for your other
> claims concerning verbosity, conciseness, high expressivity, and
> complex vs simple syntax.  But so far you haven't actually offered
> any.  I'm not asking for "huge amounts", I'm asking you for _any_
> evidence.

What's wrong with my last post? or my post about the ray tracer?

> Do you have any references to studies supporting your 
> claims?

No.

> I mean, if there are "huge amounts" you would think that 
> somebody somewhere would have done a study or two and you'd have at
> least that reference in your back pocket to trot out in situations
> like this.

From my point of view, there is so much evidence that you just have to look
at any repository of comparable code. Have you looked at the shootout or my
ray tracer?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3y86y7wno.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > OK, I know you claimed "huge amounts" of evidence for your other
> > claims concerning verbosity, conciseness, high expressivity, and
> > complex vs simple syntax.  But so far you haven't actually offered
> > any.  I'm not asking for "huge amounts", I'm asking you for _any_
> > evidence.
> 
> What's wrong with my last post? or my post about the ray tracer?
> 
> > Do you have any references to studies supporting your 
> > claims?
> 
> No.

OK.  That's fine.  I don't consider the stuff you have been stating as
evidence, but rather opinion based on some intuition.  As I stated
elsewhere, the sort of counter intuitive (to me at least) claims you
are making about complex syntax being a good thing (or at least a
necessary thing for high expressivity) need some real evidence.  As in
studies based on controlled experiments as done by, for example,
cognitive scientists.  That would lend at least some credence to what
you are saying.


> From my point of view, there is so much evidence that you just have to look

That's the problem.  It's just from your "point of view", i.e., some
opinion which could be totally wrong.  And from my point of view
indeed looks to be totally wrong.

> at any repository of comparable code. Have you looked at the shootout or my
> ray tracer?

Yes.  And many other such things.  Basically irrelevant.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305ff9b$0$17470$ed2e19e4@ptn-nntp-reader04.plus.net>
jayessay wrote:
>> at any repository of comparable code. Have you looked at the shootout or
>> my ray tracer?
> 
> Yes.  And many other such things.  Basically irrelevant.

Can you elaborate? Why is that objective evidence irrelevant?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3ll2x92yg.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> >> at any repository of comparable code. Have you looked at the shootout or
> >> my ray tracer?
> > 
> > Yes.  And many other such things.  Basically irrelevant.
> 
> Can you elaborate? Why is that objective evidence irrelevant?


a) Because you incorrectly think it is _objective_ when it isn't.

b) It in no way is controlled, so could mean anything, even on some
   "objective" basis

c) It in no way tests, shows, or in any other sense indicates anything
   about the cognitive impedence match or mismatch for people reading
   it.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43063024$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
jayessay wrote:
> a) Because you incorrectly think it is _objective_ when it isn't.

Just plot age vs LOC for the code on the shootout. You can't get much more
objective than that.

> b) It in no way is controlled, so could mean anything, even on some
>    "objective" basis

A controlled experiment is intractable in this case. Indeed, we're trying to
measure something inherently subjective (verbosity).

> c) It in no way tests, shows, or in any other sense indicates anything
>    about the cognitive impedence match or mismatch for people reading
>    it.

"cognitive impedence match"?!

Even if you want to disregard this evidence, the best conclusion that you
can come to is that you don't know because you don't have any evidence
unless you have evidence to the contrary. Do you?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m38xyx8war.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > a) Because you incorrectly think it is _objective_ when it isn't.
> 
> Just plot age vs LOC for the code on the shootout. You can't get much more
> objective than that.

But that's irrelevant because the _input_ is suspect.


> > b) It in no way is controlled, so could mean anything, even on some
> >    "objective" basis
> 
> A controlled experiment is intractable in this case.

That may well be true, but you don't know this for a fact.  There has
been work in the general area and specifically as relates to
programming.  The PPIG http://www.ppig.org/ folks were one group
interested in this stuff.  But I don't really follow it too much
anymore so don't really know what's new.

> Indeed, we're trying to measure something inherently subjective
> (verbosity).

If that is the case, then as you indicate, there is no objective
evidence and you've been wasting your (and everyone elses) time.

OTOH, I'm not convinced that "verbosity" is all there is to the
general point: simple expression for the semantics of complex
situations (or something like that).


> > c) It in no way tests, shows, or in any other sense indicates anything
> >    about the cognitive impedence match or mismatch for people reading
> >    it.
> 
> "cognitive impedence match"?!

Right.


> Even if you want to disregard this evidence, the best conclusion
> that you can come to is that you don't know because you don't have
> any evidence

Now, we are getting somewhere.  I think if you are really interested
in this and really think it is a _very big deal_ (which it may well
be), then you need to seek out some cognitive scientists (as a first
approximation of who to find) and try explaining your ideas to them.
PPIG may be a good place to start.  Next see if you can get them
interested enough to design some real experiments in order to get some
real evidence.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430665d9$0$17478$ed2e19e4@ptn-nntp-reader04.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> jayessay wrote:
>> > a) Because you incorrectly think it is _objective_ when it isn't.
>> 
>> Just plot age vs LOC for the code on the shootout. You can't get much
>> more objective than that.
> 
> But that's irrelevant because the _input_ is suspect.

Why do you think that?

>> Indeed, we're trying to measure something inherently subjective
>> (verbosity).
> 
> If that is the case, then as you indicate, there is no objective
> evidence and you've been wasting your (and everyone elses) time.

On the basis of that argument, nothing is objective.

>> Even if you want to disregard this evidence, the best conclusion
>> that you can come to is that you don't know because you don't have
>> any evidence
> 
> Now, we are getting somewhere.  I think if you are really interested
> in this and really think it is a _very big deal_ (which it may well
> be), then you need to seek out some cognitive scientists (as a first 
> approximation of who to find) and try explaining your ideas to them.
> PPIG may be a good place to start.  Next see if you can get them
> interested enough to design some real experiments in order to get some
> real evidence.

Like computer science and creation science, cognitive science isn't a
science. Frankly, I'd rather taxes weren't wasted on it...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m34q9k8se9.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> >> Just plot age vs LOC for the code on the shootout. You can't get much
> >> more objective than that.
> > 
> > But that's irrelevant because the _input_ is suspect.
> 
> Why do you think that?

You can't be serious.


> >> Indeed, we're trying to measure something inherently subjective
> >> (verbosity).
> > 
> > If that is the case, then as you indicate, there is no objective
> > evidence and you've been wasting your (and everyone elses) time.
> 
> On the basis of that argument, nothing is objective.

What are you talking about?  First, there isn't even an argument there
(not even a single deduction for chrissake).  Second the topic only
concerns that which is "inherently subjective".  How do you get a
universal out of a single point?  Really, your reasoning skills are
highly suspect.


> >> Even if you want to disregard this evidence, the best conclusion
> >> that you can come to is that you don't know because you don't have
> >> any evidence
> > 
> > Now, we are getting somewhere.  I think if you are really interested
> > in this and really think it is a _very big deal_ (which it may well
> > be), then you need to seek out some cognitive scientists (as a first 
> > approximation of who to find) and try explaining your ideas to them.
> > PPIG may be a good place to start.  Next see if you can get them
> > interested enough to design some real experiments in order to get some
> > real evidence.
> 
> Like computer science and creation science, cognitive science isn't a
> science. Frankly, I'd rather taxes weren't wasted on it...

Then why are you prentending to play at it?  Actually, playing at it
is even worse, so are you saying people shouldn't waste any money on
you or any of your efforts???  Actually, that's beginning to sound
like a brilliant idea.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: drewc
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <UMoNe.266538$s54.149356@pd7tw2no>
Jon Harrop wrote:
> jayessay wrote:
> 
>>>at any repository of comparable code. Have you looked at the shootout or
>>>my ray tracer?
>>
>>Yes.  And many other such things.  Basically irrelevant.
> 
> 
> Can you elaborate? Why is that objective evidence irrelevant?

"You keep using that word. I do not think it means what you think it means"

ob·jec·tive (əb-jĕk'tĭv) pronunciation
adj.

    1. Of or having to do with a material object.
    2. Having actual existence or reality.
    3.
          a. Uninfluenced by emotions or personal prejudices: an 
objective critic. See synonyms at fair.
          b. Based on observable phenomena; presented factually: an 
objective appraisal.

Your full quote (which was cut off above) was :

"From my point of view, there is so much evidence that you just have to 
look at any repository of comparable code."

The keywords here being "From my point of view". The "evidence" you have 
offered to support your claims is _subjective_ at best, and nonsense at 
the other end of the spectrum.

You seem to be focused on syntax, of all things. Syntax does not a 
language make. You have offered opinion as fact, and that has upset 
people. If you want to make a subjective comparison, that's your 
perogative, but it it offensive to opine and claim factualness.

If, instead of arguing on c.l.l, you had taken the time to learn some 
lisp, you might be of a different opinion, or at least have some 
concrete complaints instead of vague handwaving.

I'm glad that you are looking to evaluate lisp, but evaluate lisp on its 
own terms. For almost 50 years people have been using the s-exp syntax.. 
experienced lisp programmers not only don't see it as a problem, but 
find it easier and more readable than a syntax-y langauge... doesn't 
that tell you something?

drewc

-- 
Drew Crampsie
drewc at tech dot coop
"Never mind the bollocks -- here's the sexp's tools."
	-- Karl A. Krueger on comp.lang.lisp
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124367908.841702.278120@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Tayssir John Gabbour wrote:
> >> Firstly, do you agree that languages are evolving to be more concise?
> >> Secondly, do you agree that more concise languages tend to have more
> >> complicated grammars? Finally, what other reason could drive this
> >> association?
> >
> > I feel I'm expected to be in some role debating you (maybe a weird
> > spidey-sense left over from debate team), but I'm genuinely interested
> > in the evidence you mentioned. So I'm still curious what evidence there
> > exists, and don't feel suited to analytical debates.
>
> Go to the shootout page, use CRAPS to sort the results by LOC. I think there
> is an association between LOC and age, with languages like OCaml/Python/D
> at the top and languages like Fortran/Pascal/Forth at the bottom. This
> could be made more objective by plotting the age of each language vs its
> LOC score.

I think it's also got quite a lot to do with the size of the machine
the programmer was writing the language for, and the tasks the authors
were taking on.  For instance, in the old days you had to worry about
implementing the compiler on the machine.

Chuck Moore wrote Forth as a way to provide small computers of the 60s
with a high-level language.  Part of the idea was that it would be
simple to reimplement for any particular machine.

C also was written for a mini-computer.  It's authors knew more
powerful languages, but they had to fit what they wrote into the 8K RAM
of their PDP-11.
The original C compiler just managed this, by using 4 separate
executables.

Fortran and Cobol were written for the mainframes of the 50's.  They
were written for applications where run-time was very important.

Lisp was written for large mainframe computers over a long period from
the 50s until the 80s.  The authors weren't so interested in runtime as
commercial users.  They were researching programming and AI, so they
were throwing away programs as often as they were refining them.  And
they would use assembly when they needed speed.

Languages developed in the environments they found themselves in.

It's worth mentioning also that your program would be much longer in
earlier versions of Lisp.
From: Björn Lindberg
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43043e18$1@news.cadence.com>
Jon Harrop wrote:

>>Lisp is a bit tricky to place on this spectrum, since it contains
>>uncommon abstraction tools like macros and whatnot.
> 
> 
> There are several other languages with equivalent capabilities so Lisp
> certainly isn't unique in this respect. Lisp is, however, much older than
> the others.

Can you give an example of another language with Lispy macros?


Bj�rn
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430468dd$0$1314$ed2619ec@ptn-nntp-reader02.plus.net>
Bj�rn Lindberg wrote:
> Can you give an example of another language with Lispy macros?

OCaml has camlp4. Many other languages (like Mathematica) have equivalent
capabilities but they aren't called "macros".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3slx79vt8.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Bj�rn Lindberg wrote:
> > Can you give an example of another language with Lispy macros?
> 
> OCaml has camlp4.

Those don't have the same capability.

> Many other languages (like Mathematica) have equivalent
> capabilities but they aren't called "macros".

I don't think you understand what capabilities Lisp macros have


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43049b0b$0$22950$ed2619ec@ptn-nntp-reader01.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> Bj�rn Lindberg wrote:
>> > Can you give an example of another language with Lispy macros?
>> 
>> OCaml has camlp4.
> 
> Those don't have the same capability.

Yes.

>> Many other languages (like Mathematica) have equivalent
>> capabilities but they aren't called "macros".
> 
> I don't think you understand what capabilities Lisp macros have

They manipulate ASTs, AFAIK.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3oe7v9sie.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> Bj�rn Lindberg wrote:
> >> > Can you give an example of another language with Lispy macros?
> >> 
> >> OCaml has camlp4.
> > 
> > Those don't have the same capability.
> 
> Yes.

Yes, you know they don't or yes, you think they do?


> >> Many other languages (like Mathematica) have equivalent
> >> capabilities but they aren't called "macros".
> > 
> > I don't think you understand what capabilities Lisp macros have
> 
> They manipulate ASTs, AFAIK.

That's too specific.  They take an environment and sexpr (typically
some form of "code"), perform transformations (and possibly any other
sort of processing) and generate a transformed sexpr (typically,
"code").  The input need not be an "AST".  Also, "Concrete" Lisp code
(as previously pointed out) is in tree form - you don't need to parse
it first into some AST.  And, macros can make use of any function
(including other macros) available to you - either from the base
language, 3rd party libs, or your own.  So, a macro is really just
another Lisp program but which runs at a different time.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4304c09e$0$17498$ed2e19e4@ptn-nntp-reader04.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> jayessay wrote:
>> > Jon Harrop <······@jdh30.plus.com> writes:
>> >> Bj�rn Lindberg wrote:
>> >> > Can you give an example of another language with Lispy macros?
>> >> 
>> >> OCaml has camlp4.
>> > 
>> > Those don't have the same capability.
>> 
>> Yes.
> 
> Yes, you know they don't or yes, you think they do?

Yes, camlp4 macros are not quite the same as Lisp's macros. However, I think
they're similar enough to count as being "Lispy macros".

>> >> Many other languages (like Mathematica) have equivalent
>> >> capabilities but they aren't called "macros".
>> > 
>> > I don't think you understand what capabilities Lisp macros have
>> 
>> They manipulate ASTs, AFAIK.
> 
> That's too specific.  They take an environment and sexpr (typically
> some form of "code"), perform transformations (and possibly any other
> sort of processing)

What sorts of processing are not transformations?

> and generate a transformed sexpr (typically, 
> "code").  The input need not be an "AST".

What else could the input be?

> Also, "Concrete" Lisp code 
> (as previously pointed out) is in tree form - you don't need to parse
> it first into some AST.  And, macros can make use of any function
> (including other macros) available to you - either from the base
> language, 3rd party libs, or your own.  So, a macro is really just
> another Lisp program but which runs at a different time.

Yes, that is the same as Mathematica code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3k6ij9i3f.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> jayessay wrote:
> >> > Jon Harrop <······@jdh30.plus.com> writes:
> >> >> Bj�rn Lindberg wrote:
> >> >> > Can you give an example of another language with Lispy macros?
> >> >> 
> >> >> OCaml has camlp4.
> >> > 
> >> > Those don't have the same capability.
> >> 
> >> Yes.
> > 
> > Yes, you know they don't or yes, you think they do?
> 
> Yes, camlp4 macros are not quite the same as Lisp's macros. However, I think
> they're similar enough to count as being "Lispy macros".

I disagree.  Frankly they look very "outside" the language and
requiring extra operators and many restrictions and targetting only
"syntax extension".  A somewhat better claim could be made that they
are somewhat "Schemy", as in define-syntax.


> >> >> Many other languages (like Mathematica) have equivalent
> >> >> capabilities but they aren't called "macros".
> >> > 
> >> > I don't think you understand what capabilities Lisp macros have
> >> 
> >> They manipulate ASTs, AFAIK.
> > 
> > That's too specific.  They take an environment and sexpr (typically
> > some form of "code"), perform transformations (and possibly any other
> > sort of processing)
> 
> What sorts of processing are not transformations?

Pretty much anything.  A macro could call out via http over the net to
get some information about some web site somewhere in making a
determination as to whether or not it should print "Hello Marion, is
that you?" as part of its side effecting output.


> > and generate a transformed sexpr (typically, 
> > "code").  The input need not be an "AST".
> 
> What else could the input be?

Anything that can be represented by an sexpr, which is pretty much
anything.  Obvious things are tables, structures, arbitrary strings,
et.al.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2zmrfumrd.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>>> jayessay wrote:

>>>> I don't think you understand what capabilities Lisp macros have
>>> 
>>> They manipulate ASTs, AFAIK.

>> That's too specific.  They take an environment and sexpr (typically
>> some form of "code"), perform transformations (and possibly any
>> other sort of processing) and generate a transformed sexpr
>> (typically, "code").  The input need not be an "AST".
>
> What else could the input be?

Anything. E.g. a string contain the text of some other language to be
parsed and compiled:

  (defmacro postfix-math (string)
    (parse-postfix (tokenize string)))

  (defun tokenize (string)
    (with-input-from-string (in string) 
      (loop for token = (read in nil in)
         until (eql token in) collect token)))

  (defun parse-postfix (tokens &optional stack)
    (if tokens
        (let ((token (pop tokens)))
          (case token
            ((+ - * /) (parse-postfix 
                        tokens
                        (cons (list token (pop stack) (pop stack)) stack)))
            (t (parse-postfix tokens (cons token stack)))))
        (first stack)))

  CL-USER> (let ((a 10) (b 20)) (postfix-math "a b +"))
  30
  CL-USER> (let ((a 10) (b 20)) (postfix-math "100 a b + *"))
  3000

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4304d578$0$1298$ed2619ec@ptn-nntp-reader02.plus.net>
Peter Seibel wrote:
> Anything. E.g. a string contain the text of some other language to be
> parsed and compiled:
> ...

How is that different from calling a lexer and parser in any other language?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3fyt79hp4.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Peter Seibel wrote:
> > Anything. E.g. a string contain the text of some other language to be
> > parsed and compiled:
> > ...
> 
> How is that different from calling a lexer and parser in any other language?

It's easier to explain how it is _similar_ to that, since the
differences are everywhere.  Sure, a macro (with all attendant
supporting functions and what not) could implement an OCaml compiler
with the usual components of lexer, parser, semantic analyzer, IL
generator, optimizer and code generator.

Of course this would be crazy, but here's a _big_ difference that
hints at why this is so different from your "calling [out to] a lexer
and parser".  You could then freely intermix OCaml and Lisp.  Use
OCaml for its more restricted domain of use and drop into CL when you
need the extra power and then back again, etc.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307f6ae$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
jayessay wrote:
> It's easier to explain how it is _similar_ to that, since the
> differences are everywhere.  Sure, a macro (with all attendant
> supporting functions and what not) could implement an OCaml compiler
> with the usual components of lexer, parser, semantic analyzer, IL
> generator, optimizer and code generator.
> 
> Of course this would be crazy, but here's a _big_ difference that
> hints at why this is so different from your "calling [out to] a lexer
> and parser".  You could then freely intermix OCaml and Lisp.  Use
> OCaml for its more restricted domain of use and drop into CL when you
> need the extra power and then back again, etc.

The problem with recreating OCaml as a DSL within Lisp is that the
pseudo-OCaml code would not benefit from the real OCaml's runtime.

However, this raises the interesting question of whether the converse is
possible - can you write a Lisp DSL within OCaml?

I think you'd need an evaluator and a camlp4 macro to interpret quoted Lisp
code. If it worked then the interface would be nasty because the Lisp code
would produce ASTs and you'd have to dynamically type check whilst
extracting the contents.

Still, could be an interesting project...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4308a109$0$97102$ed2619ec@ptn-nntp-reader03.plus.net>
Jon Harrop wrote:
> The problem with recreating OCaml as a DSL within Lisp is that the
> pseudo-OCaml code would not benefit from the real OCaml's runtime.
> 
> However, this raises the interesting question of whether the converse is
> possible - can you write a Lisp DSL within OCaml?

Thinking about this, I'm sure it can be done quite easily but you'd end up
with the opposite problem - the OCaml would be fast but the embedded Lisp
would be slow. The only way to get the benefits of both languages would be
to write veneers and use full implementations of both languages.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <FB1Oe.77937$Ph4.2447873@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> Jon Harrop wrote:
>> The problem with recreating OCaml as a DSL within Lisp is that the
>> pseudo-OCaml code would not benefit from the real OCaml's runtime.
>> 
>> However, this raises the interesting question of whether the converse is
>> possible - can you write a Lisp DSL within OCaml?
>
> Thinking about this, I'm sure it can be done quite easily but you'd end up
> with the opposite problem - the OCaml would be fast but the embedded Lisp
> would be slow. The only way to get the benefits of both languages would be
> to write veneers and use full implementations of both languages.

It should be possible (though a decent bit of work... mostly just
parsing and the type system) to implement OCaml within Lisp, and have it
run just as fast.  However, it would still take up lots of space, just
because OCaml doesn't try to do a vast set of things that CL promises.

In fact, doing this would be a fun demonstration of the major
differences between ML and CL, and why OCaml initially appears much
faster, smaller, et cetera.

A CL within OCaml would be a lot more work, AFAICS, because there are
many aspects of OCaml you can't leverage to support CL (unlike the
opposite process).  However, you'd get out of it whatever you put into
it -- I'm sure it would be possible to make a really zippy CL compiler
in OCaml with a really nice FFI bridge between the two.

Cheers.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4308ace3$0$22951$ed2619ec@ptn-nntp-reader01.plus.net>
Julian Squires wrote:
> It should be possible (though a decent bit of work... mostly just
> parsing and the type system) to implement OCaml within Lisp,

SML's type system would be more feasible but OCaml has polymorphic variants
and objects to contend with as well. You've also got all of the
higher-level optimisations, e.g. in the pattern matcher, to implement. Oh,
and the module language. You'd be talking about >200kLOC, I'd guess.

> and have it run just as fast.

If you mean the embedded OCaml would be just as fast as real OCaml then I
can't see how, unless you include all of the real OCaml (run-time and
native-code generation).

> In fact, doing this would be a fun demonstration of the major
> differences between ML and CL, and why OCaml initially appears much
> faster, smaller, et cetera.

The problem is that ML trades macros and dynamic typing for a faster
run-time. With the converse you can easily implement a dynamically typed
language within OCaml at little cost. Macros would be trickier to implement
efficiently. But you can't efficiently implement OCaml's static typing in
Lisp without lots of assurances about how your Lisp implementation will
optimise it. Even then, there are likely to be optimisations that cannot be
expressed to the Lisp compiler (immutability?).

> A CL within OCaml would be a lot more work, AFAICS, because there are
> many aspects of OCaml you can't leverage to support CL (unlike the
> opposite process).

Interesting. That seems to be the opposite view. :-)

> However, you'd get out of it whatever you put into 
> it -- I'm sure it would be possible to make a really zippy CL compiler
> in OCaml with a really nice FFI bridge between the two.

Yes. That would also be a lot of work though. At the very least (performance
wise) you'd want to apply all of the usual Lisp optimisations.

I think we can safely say that slow subsets of either language could be
implemented without too much difficulty in the other language but making
full and efficient implementations would be a huge undertaking. I'm sure it
would be easier to use a FFI.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <Yg2Oe.77957$Ph4.2448366@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
>> It should be possible (though a decent bit of work... mostly just
>> parsing and the type system) to implement OCaml within Lisp,
>
> SML's type system would be more feasible but OCaml has polymorphic variants
> and objects to contend with as well. You've also got all of the
> higher-level optimisations, e.g. in the pattern matcher, to implement. Oh,
> and the module language. You'd be talking about >200kLOC, I'd guess.

No, I suspect it would be about 20-30kloc or so, most of that being the
type system.  Have you ever looked at the OCaml compiler?  It's really
pretty simple, as compilers go, or it was the last time I checked.

Of course, one would make use of all the existing Lisp compiler
features.  It wouldn't hurt to tie oneself to a specific compiler, like
SBCL, too, so that one could make more predictable decisions about how
things get optimized.

>> and have it run just as fast.
>
> If you mean the embedded OCaml would be just as fast as real OCaml then I
> can't see how, unless you include all of the real OCaml (run-time and
> native-code generation).

You'd of course use the lisp compiler's code generation and runtime.  I
don't see how OCaml's runtime is any different from the average CL
runtime, once you ensure that your generated code doesn't trigger any CL
features that aren't in ML.

The ML-on-CL compiler would be emitting CL code with lots of type
information and declarations, which should compile quite efficiently.
AFAIR, Python (CMUCL/SBCL's compiler) actually does more code-generation
optimization than OCaml's compiler, which relies a lot on the
higher-level optimizations (particularly resulting from type inference)
to produce fast code.

> I think we can safely say that slow subsets of either language could be
> implemented without too much difficulty in the other language but making
> full and efficient implementations would be a huge undertaking. I'm sure it
> would be easier to use a FFI.

Hmm, I disagree, but I guess I don't care enough to try to prove it one
way or the other.  Though I think I might poke around at a simplified
version of that idea (maybe using Qi or something as a starting point)
and compare disassembled code.

Cheers.

-- 
Julian Squires
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mtr6rF17u8qbU1@individual.net>
Jon Harrop wrote:
> The problem is that ML trades macros and dynamic typing for a faster
> run-time. With the converse you can easily implement a dynamically typed

No, macros are purely syntactic.  They don't result in slowdown, but 
rather speed things up (because they can package up efficient code that 
would otherwise have to use higher-order functions etc.; compare dolist 
to mapcar).  ML trades macros for a more "normal" syntax (and dynamic 
typing/dynamism for efficiency).

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309d4bb$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Ulrich Hobelmann wrote:
> Jon Harrop wrote:
>> The problem is that ML trades macros and dynamic typing for a faster
>> run-time. With the converse you can easily implement a dynamically typed
> 
> No, macros are purely syntactic.

No. If they are as powerful as Lisp's then they interfere with the type
system.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3k6ie6kf4.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Ulrich Hobelmann wrote:
> > Jon Harrop wrote:
> >> The problem is that ML trades macros and dynamic typing for a faster
> >> run-time. With the converse you can easily implement a dynamically typed
> > 
> > No, macros are purely syntactic.
> 
> No. If they are as powerful as Lisp's then they interfere with the type
> system.

Totally wrong.  In fact, macros would be a fundamental mechanism in
how you would go about getting the type inferencing with full
compilation to native code.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mu5g9F187gk0U2@individual.net>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
>> Jon Harrop wrote:
>>> The problem is that ML trades macros and dynamic typing for a faster
>>> run-time. With the converse you can easily implement a dynamically typed
>> No, macros are purely syntactic.
> 
> No. If they are as powerful as Lisp's then they interfere with the type
> system.

Well, it would be hard to create ML expressions, since they are more 
than s-exps.  But assuming that the language could manipulate ML syntax 
objects, there's no problem.

After all, the code generated by the macro should be correct.  In a 
statically typed language there's no reason why the generated code 
shouldn't also be correctly typed.  yacc generated correctly typed C 
code.  Why shouldn't a macro be able to do that just as well?

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3fyt26jrt.fsf@rigel.goldenthreadtech.com>
Ulrich Hobelmann <···········@web.de> writes:

> Jon Harrop wrote:
> > Ulrich Hobelmann wrote:
> >> Jon Harrop wrote:
> >>> The problem is that ML trades macros and dynamic typing for a faster
> >>> run-time. With the converse you can easily implement a dynamically typed
> >> No, macros are purely syntactic.
> > No. If they are as powerful as Lisp's then they interfere with the
> > type
> > system.
> 
> Well, it would be hard to create ML expressions, since they are more
> than s-exps.  But assuming that the language could manipulate ML
> syntax objects, there's no problem.

The way you would likely have to hack this is to have a full blown
lexer and parser on strings which would then output sexprs as a first
level (i.e., it would output the AST for the stuff).  The strings are
just ML/OCaml/line-noise-r-us programs.  But, again, this would be
"just" another macro and so you could end up more or less effectively
able to mix the two (ML/OCaml/line-noise-r-us and CL).  As I say, it's
probably crazy because the ML/OCaml stuff doesn't really buy you
anything that isn't pretty directly in CL anyway (I'm talking
semantics not "complex syntax" or whatever JH seems to like).  The
Prolog (logic programming) model _does_ buy you something and so it
_does_ make sense to build, and several have been built.


> code.  Why shouldn't a macro be able to do that just as well?

If it happens it must be possible.  This type of stuff has been done.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309eee8$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Ulrich Hobelmann wrote:
> Well, it would be hard to create ML expressions, since they are more
> than s-exps.

How do you mean?

> But assuming that the language could manipulate ML syntax 
> objects, there's no problem.

Yes. It is possible to write Lisp code so that embedded ML code can be
compiled to Lisp code. However, it would be very difficult (IMHO,
impossible with current technology) to make that Lisp code compile to
native code that is as efficient as if the ML had been compiled directly
(e.g. by MLton or ocamlopt). Consider that none of the hand-optimised Lisp
implementations of my ray tracer achieve the performance of ML.

So Lisp is fine for embedding DSLs where performance is not so important but
I think it is a triumph of hope over reality to expect embedded ML to be as
fast as ML code compiled with a real ML compiler.

> After all, the code generated by the macro should be correct.  In a
> statically typed language there's no reason why the generated code
> shouldn't also be correctly typed.  yacc generated correctly typed C
> code.  Why shouldn't a macro be able to do that just as well?

I'm no expert but my understanding it that the static type system will not
necessarily terminate in the presence of Lisp's macros. This obviates the
possibility of completely statically typing all Lisp programs. I believe
the alternative is to try to apply static typing as much as possible. This
clearly works quite well but the performance results speak for themselves.

The proof that I am thinking of is quite an old result in computer science.
There is probably more work on this by now. If you like I can ask around
for more information on this - I studied it almost a decade ago...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3br3q6ij8.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Yes. It is possible to write Lisp code so that embedded ML code can be
> compiled to Lisp code. However, it would be very difficult (IMHO,
> impossible with current technology) to make that Lisp code compile to
> native code that is as efficient as if the ML had been compiled directly
> (e.g. by MLton or ocamlopt)

Why?  That might be true but I see no obvious reason to believe this.


>. Consider that none of the hand-optimised Lisp
> implementations of my ray tracer achieve the performance of ML.

Assuming this is true (I don't know that it is as there may be people
out there that have built such and you don't even know it), as a
scientist you should know that it is irrelevant.


> So Lisp is fine for embedding DSLs where performance is not so important

No, you're wrong here as well.  Performance is actually one of the
reasons why a DSL can be so potent.  It can take advantage of the
special characteristics of the domain to create code that can be _far_
faster than what you can build with a general purpose language.  The
reason is the compiler for the DSL can make optimizations which can't
be made otherwise because the general purpose language doesn't have
the information available to allow it to prove that the optimization
will result in correct code.  So, it doesn't get done.


> but I think it is a triumph of hope over reality to expect embedded
> ML to be as fast as ML code compiled with a real ML compiler.

What reality?  Yours?  It's clear you don't actually know much about
this sort of stuff, so why would you think your belief or intuition
here is of any value?


> > After all, the code generated by the macro should be correct.  In a
> > statically typed language there's no reason why the generated code
> > shouldn't also be correctly typed.  yacc generated correctly typed C
> > code.  Why shouldn't a macro be able to do that just as well?
> 
> I'm no expert

Right.


> but my understanding it that the static type system will not
> necessarily terminate in the presence of Lisp's macros.

Your lost.


> This obviates the possibility of completely statically typing all
> Lisp programs.

True, but only by accident - your reasoning is completly wrong.  Also,
in this context (embdedded ML/OCaml) you aren't dealing with "all lisp
programs", so the statement is irrelevant.


> The proof that I am thinking of is quite an old result in computer science.
> There is probably more work on this by now. If you like I can ask around
> for more information on this - I studied it almost a decade ago...

But apparently it didn't sink in...


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309ffa8$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
> 
>> Yes. It is possible to write Lisp code so that embedded ML code can be
>> compiled to Lisp code. However, it would be very difficult (IMHO,
>> impossible with current technology) to make that Lisp code compile to
>> native code that is as efficient as if the ML had been compiled directly
>> (e.g. by MLton or ocamlopt)
> 
> Why?  That might be true but I see no obvious reason to believe this.

I'm not sure that you can convey all of the type information to the Lisp
compiler.

>> So Lisp is fine for embedding DSLs where performance is not so important
> 
> No, you're wrong here as well.  Performance is actually one of the
> reasons why a DSL can be so potent.  It can take advantage of the
> special characteristics of the domain to create code that can be _far_
> faster than what you can build with a general purpose language.  The
> reason is the compiler for the DSL can make optimizations which can't
> be made otherwise because the general purpose language doesn't have
> the information available to allow it to prove that the optimization
> will result in correct code.  So, it doesn't get done.

I've just described precisely the converse.

>> This obviates the possibility of completely statically typing all
>> Lisp programs.
> 
> True, but only by accident - your reasoning is completly wrong.

Would you care to elaborate?

> Also, 
> in this context (embdedded ML/OCaml) you aren't dealing with "all lisp
> programs", so the statement is irrelevant.

There was no context - my statement was general.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3y86t6gff.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> > 
> >> Yes. It is possible to write Lisp code so that embedded ML code can be
> >> compiled to Lisp code. However, it would be very difficult (IMHO,
> >> impossible with current technology) to make that Lisp code compile to
> >> native code that is as efficient as if the ML had been compiled directly
> >> (e.g. by MLton or ocamlopt)
> > 
> > Why?  That might be true but I see no obvious reason to believe this.
> 
> I'm not sure that you can convey all of the type information to the Lisp
> compiler.

Think of it this way: Clearly the "type information" is conveyed by
the OCaml compiler to your AMD64 (or whatever you're using).  There is
no reason why the embedded version in CL can't do the same.  You may
need to a) use an RTL or b) and generate the equivalent "objects" as
just hunks of bytes that the RTL can then deal with directly.  I'm not
saying any of this makes sense from a _practical_ point of view
because the effort far exceeds any possible benefit.  But clearly it
can be done.


> >> So Lisp is fine for embedding DSLs where performance is not so important
> > 
> > No, you're wrong here as well.  Performance is actually one of the
> > reasons why a DSL can be so potent.  It can take advantage of the
> > special characteristics of the domain to create code that can be _far_
> > faster than what you can build with a general purpose language.  The
> > reason is the compiler for the DSL can make optimizations which can't
> > be made otherwise because the general purpose language doesn't have
> > the information available to allow it to prove that the optimization
> > will result in correct code.  So, it doesn't get done.
> 
> I've just described precisely the converse.

OK, you lost me.


> > Also, 
> > in this context (embdedded ML/OCaml) you aren't dealing with "all lisp
> > programs", so the statement is irrelevant.
> 
> There was no context - my statement was general.

OK, but in that case your statement was also irrelevant.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a1267$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> I'm not sure that you can convey all of the type information to the Lisp
>> compiler.
> 
> Think of it this way: Clearly the "type information" is conveyed by
> the OCaml compiler to your AMD64 (or whatever you're using).

The type information is used by the OCaml compiler when generating AMD64
assembler, yes. Some type information is also buried in the generated code
and data for the OCaml run time (e.g. the GC).

> There is 
> no reason why the embedded version in CL can't do the same.  You may
> need to a) use an RTL or b) and generate the equivalent "objects" as
> just hunks of bytes that the RTL can then deal with directly.  I'm not
> saying any of this makes sense from a _practical_ point of view
> because the effort far exceeds any possible benefit.  But clearly it
> can be done.

Yes, but that trade-off is critical. If you implement most of a native-code
ML compiler, as you suggest, then you will lose most of the benefits of
embedding in Lisp:

1. It will be extremely difficult and time consuming to write, when ease of
implementation was a motivating factor.
2. The interface between Lisp and OCaml code is likely to be unnecessarily
complicated, when interoperability was a motivating factor.
3. The generated code may even be platform specific (e.g. AMD64).
4. The generated code will be Lisp-compiler (and even version) specific.
5. You would need to know a huge amount about the transformations performed
by the Lisp compiler itself, in order to exploit them.

Even if you can get this low-level Lisp to compile to efficient native code,
you still don't have a run-time environment that can exploit the type
information. OCaml's run time contains support for various forms of type
information, such as specialised float records and arrays, polymorphic
variant constructors, polymorphic values, objects and so on.

So you've put in all of the effort required to write an ML compiler but have
not reaped all of the benefits.

The conclusion is that embedding your languages in Lisp is a bad idea if you
want the best possible performance. That is not to say that medium
performance languages (e.g. not quite as fast as SBCL-compiled Lisp) are
not amenable to embedding in Lisp. So there are still a huge number of DSLs
that can be productively embedded in Lisp.

Also, for a seasoned Lisp programmer, I have seen no evidence that ML or any
other language would be better for writing a high-performance compiler. So
Lisp is not comparatively worse, its just that you chose an incredibly
difficult task.

From a practical point of view, your best bet would probably be to write a
Lisp-OCaml interface and use both Lisp and OCaml compilers.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3u0hg6670.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> I'm not sure that you can convey all of the type information to the Lisp
> >> compiler.
> > 
> > Think of it this way: Clearly the "type information" is conveyed by
> > the OCaml compiler to your AMD64 (or whatever you're using).
> 
> The type information is used by the OCaml compiler when generating AMD64
> assembler, yes. Some type information is also buried in the generated code
> and data for the OCaml run time (e.g. the GC).
> 
> > There is 
> > no reason why the embedded version in CL can't do the same.  You may
> > need to a) use an RTL or b) and generate the equivalent "objects" as
> > just hunks of bytes that the RTL can then deal with directly.  I'm not
> > saying any of this makes sense from a _practical_ point of view
> > because the effort far exceeds any possible benefit.  But clearly it
> > can be done.
> 
> Yes, but that trade-off is critical. If you implement most of a native-code
> ML compiler, as you suggest, then you will lose most of the benefits of
> embedding in Lisp:

Well, there aren't actually any benefits, so you really can't lose
them.  To the extent there were any, the above won't lose anything.


> 1. It will be extremely difficult and time consuming to write, when ease of
> implementation was a motivating factor.

No, ease of implementation was not/wouldn't be a factor because OCaml
doesn't bring anything to the table.  While Prolog does, for example.
There's just nothing there that isn't basically already in CL.  So
that's just irrelevant to the discussion.  The whole point of this was
to indicate that with CL you could _embed_ OCaml in it via the use of
macros.  This was just to get you to understand an important aspect of
Lisp macros, and how very different this is from the "invoke outside
lexer/parser or preprocessor hack".  Unfortunately the effort failed.


> 2. The interface between Lisp and OCaml code is likely to be unnecessarily
> complicated, when interoperability was a motivating factor.

No, this will be quite smooth, that is the basic point.  And you still
don't understand it.


> 3. The generated code may even be platform specific (e.g. AMD64).

No, it would not be, unless you actually generated the native machine
code instead of passing off the Lisp compiler, but that wouldn't make
much sense.


> 4. The generated code will be Lisp-compiler (and even version) specific.

What, the end native code?  Well, duh!  But the macro code
implementing the thing would likely be nearly 100% portable CL.  So,
this point isn't just wrong, it makes no sense at all.


> 5. You would need to know a huge amount about the transformations performed
> by the Lisp compiler itself, in order to exploit them.

Not at all.  And it is strange at best that you would think this.


> Even if you can get this low-level Lisp to compile to efficient native code,
> you still don't have a run-time environment that can exploit the type
> information. OCaml's run time contains support for various forms of type
> information, such as specialised float records and arrays, polymorphic
> variant constructors, polymorphic values, objects and so on.

BFD, that would not be anywhere near the larger part of the work.  And
given I mention the likely need for an RTL, this point is totally
irrelevant.


> So you've put in all of the effort required to write an ML compiler but have
> not reaped all of the benefits.

There are no benefits.  And BTW, can you even _read_?  As I pointed
out in the text quoted above, this entire thing makes no sense from a
practical point of view since there are no benefits, since there isn't
any interesting computing paradigm in ML/OCaml that would even make
sense for CL.


> The conclusion is that embedding your languages in Lisp is a bad idea if you
> want the best possible performance. That is not to say that medium
> performance languages (e.g. not quite as fast as SBCL-compiled Lisp) are
> not amenable to embedding in Lisp. So there are still a huge number of DSLs
> that can be productively embedded in Lisp.

Your conclusions are laughable and clearly show you don't have even
the ghost of an idea of what's involved here.


> From a practical point of view, your best bet would probably be to write a
> Lisp-OCaml interface and use both Lisp and OCaml compilers.

I see no benefit in that at all.  Much more practical and useful would
be a Lisp-C++ interface and that is indeed being worked on (THANK YOU
Lisp NYC!)


I don't really have any more time to waste on your nonsense, so I
guess I will withdraw and let some other sucker take over.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309e802$0$634$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
> 
>>Jon Harrop wrote:
>>
>>>The problem is that ML trades macros and dynamic typing for a faster
>>>run-time. With the converse you can easily implement a dynamically typed
>>
>>No, macros are purely syntactic.
> 
> No. If they are as powerful as Lisp's then they interfere with the type
> system.

Can you explain that in more detail?

-- 
Jens Axel S�gaard
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3oe7q6kk1.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > It's easier to explain how it is _similar_ to that, since the
> > differences are everywhere.  Sure, a macro (with all attendant
> > supporting functions and what not) could implement an OCaml compiler
> > with the usual components of lexer, parser, semantic analyzer, IL
> > generator, optimizer and code generator.
> > 
> > Of course this would be crazy, but here's a _big_ difference that
> > hints at why this is so different from your "calling [out to] a lexer
> > and parser".  You could then freely intermix OCaml and Lisp.  Use
> > OCaml for its more restricted domain of use and drop into CL when you
> > need the extra power and then back again, etc.
> 
> The problem with recreating OCaml as a DSL within Lisp is that the
> pseudo-OCaml code would not benefit from the real OCaml's runtime.

There might be some benefit there, but not necessarily.  As an analogy
Prolog in CL examples which don't go all the way to a WAM
implementation lose relative to a native Prolog.  But if you go to
(all the trouble and make) a WAM the result will be quite competitive.
You're probably confused into thinking that the result is interpreted.
No, I mean it would be compiled (and then via the Lisp compiler to
native code).  In the OCaml case, most of the trouble would likely be
in the type inference, I don't see it loosing much if anything here.


> I think you'd need an evaluator and a camlp4 macro to interpret quoted Lisp
> code. If it worked then the interface would be nasty because the Lisp code
> would produce ASTs and you'd have to dynamically type check whilst
> extracting the contents.

I think this will end up pretty haphazard and/or strictly an
interpreter.  The camlp4 stuff is hacked on preprocessor that is
outside the language.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <QSpOe.53$DJ5.70033@typhoon.nyu.edu>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
> 
> 
>>jayessay wrote:
>>
>>>It's easier to explain how it is _similar_ to that, since the
>>>differences are everywhere.  Sure, a macro (with all attendant
>>>supporting functions and what not) could implement an OCaml compiler
>>>with the usual components of lexer, parser, semantic analyzer, IL
>>>generator, optimizer and code generator.
>>>
>>>Of course this would be crazy, but here's a _big_ difference that
>>>hints at why this is so different from your "calling [out to] a lexer
>>>and parser".  You could then freely intermix OCaml and Lisp.  Use
>>>OCaml for its more restricted domain of use and drop into CL when you
>>>need the extra power and then back again, etc.
>>
>>The problem with recreating OCaml as a DSL within Lisp is that the
>>pseudo-OCaml code would not benefit from the real OCaml's runtime.
> 
> 
> There might be some benefit there, but not necessarily.  As an analogy
> Prolog in CL examples which don't go all the way to a WAM
> implementation lose relative to a native Prolog.  But if you go to
> (all the trouble and make) a WAM the result will be quite competitive.

Just for the record.  The Prolog implementation that comes with LW is 
WAM based.

Cheers
--
Marco
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309f5bc$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> The problem with recreating OCaml as a DSL within Lisp is that the
>> pseudo-OCaml code would not benefit from the real OCaml's runtime.
> 
> There might be some benefit there, but not necessarily.  As an analogy
> Prolog in CL examples which don't go all the way to a WAM
> implementation lose relative to a native Prolog.  But if you go to
> (all the trouble and make) a WAM the result will be quite competitive.
> You're probably confused into thinking that the result is interpreted.

No, I am assuming the embedded OCaml is compiled via Lisp to native code.

> No, I mean it would be compiled (and then via the Lisp compiler to
> native code).  In the OCaml case, most of the trouble would likely be
> in the type inference, I don't see it loosing much if anything here.

Partly, I'm worried about the type information that would be lost because it
could not be conveyed to the Lisp compiler, which would then generate
native code with run-time type checks. For example, what are the Lisp
equivalents of OCaml's polymorphic variants and objects? I doubt the latter
are the same as Lisp's, they are certainly very different from C++ and
Java's objects.

>> I think you'd need an evaluator and a camlp4 macro to interpret quoted
>> Lisp code. If it worked then the interface would be nasty because the
>> Lisp code would produce ASTs and you'd have to dynamically type check
>> whilst extracting the contents.
> 
> I think this will end up pretty haphazard and/or strictly an
> interpreter.

No. I'd have thought you could use camlp4 to compile Lisp code into OCaml
code and then compile that into native code using the OCaml compiler. So,
like the OCaml-in-Lisp example, this would also compile to native code.

However, conveying the results of static typing to a Lisp compiler via
generated code is likely to result in slow OCaml-in-Lisp code and dynamic
typing of Lisp-in-OCaml code, although easy, would incur a similar cost
unless you included the static typing optimisations found in Lisp
compilers.

So I wouldn't expect good performance in either direction. Thanks to Lisp's
simple syntax, I expect it would be substantially easier to get
Lisp-in-OCaml working with reasonable efficiency. And no, I'm not talking
about the whole of CL. ;-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m37jed7wig.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > No, I mean it would be compiled (and then via the Lisp compiler to
> > native code).  In the OCaml case, most of the trouble would likely be
> > in the type inference, I don't see it loosing much if anything here.
> 
> Partly, I'm worried about the type information that would be lost because it
> could not be conveyed to the Lisp compiler

Why in the world would you think it can't be conveyed?  There seems to
be a fundamental flaw in your thinking to arrive at that conclusion.

>, which would then generate native code with run-time type
>checks.

No, the point is this would in fact generate code w/o such checks.
You don't seem to understand this.  Note that such an implementation
may need an RTL to achieve this in the general case.


> For example, what are the Lisp equivalents of OCaml's polymorphic
> variants and objects? I doubt the latter are the same as Lisp's,
> they are certainly very different from C++ and Java's objects.

And?


> >> I think you'd need an evaluator and a camlp4 macro to interpret quoted
> >> Lisp code. If it worked then the interface would be nasty because the
> >> Lisp code would produce ASTs and you'd have to dynamically type check
> >> whilst extracting the contents.
> > 
> > I think this will end up pretty haphazard and/or strictly an
> > interpreter.
> 
> No. I'd have thought you could use camlp4 to compile Lisp code into OCaml
> code and then compile that into native code using the OCaml compiler.

Your confused.  I wasn't talking about that.  I'm talking about you
can't intermix the two (at even the subexpression level).  So, you
have a cobbled up afair.

> So, like the OCaml-in-Lisp example, this would also compile to
> native code.

Possible, but irrelevant


> However, conveying the results of static typing to a Lisp compiler via
> generated code is likely to result in slow OCaml-in-Lisp code and dynamic

Only because you are confused.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a0167$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> jayessay wrote:
>> Partly, I'm worried about the type information that would be lost because
>> it could not be conveyed to the Lisp compiler
> 
> Why in the world would you think it can't be conveyed?

Lisp doesn't have equivalents for most of OCaml's types. You will need to
know something about OCaml's type system to understand what I said.

>> No. I'd have thought you could use camlp4 to compile Lisp code into OCaml
>> code and then compile that into native code using the OCaml compiler.
> 
> Your confused.  I wasn't talking about that.  I'm talking about you
> can't intermix the two (at even the subexpression level).  So, you
> have a cobbled up afair.

The OCaml and embedded Lisp could call each other, if that's what you mean.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m33bp17vct.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> jayessay wrote:
> >> Partly, I'm worried about the type information that would be lost because
> >> it could not be conveyed to the Lisp compiler
> > 
> > Why in the world would you think it can't be conveyed?
> 
> Lisp doesn't have equivalents for most of OCaml's types. You will need to
> know something about OCaml's type system to understand what I said.

Yes, but that is irrelevant.  As I said, you may need an RTL go along
with the compiler (similar to having a WAM).


> >> No. I'd have thought you could use camlp4 to compile Lisp code into OCaml
> >> code and then compile that into native code using the OCaml compiler.
> > 
> > Your confused.  I wasn't talking about that.  I'm talking about you
> > can't intermix the two (at even the subexpression level).  So, you
> > have a cobbled up afair.
> 
> The OCaml and embedded Lisp could call each other, if that's what
> you mean.

No, that is not what I mean.  You can freely mix them at the source
level.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <WcnOe.78344$Ph4.2464932@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
> Partly, I'm worried about the type information that would be lost because it
> could not be conveyed to the Lisp compiler, which would then generate
> native code with run-time type checks. For example, what are the Lisp
> equivalents of OCaml's polymorphic variants and objects? I doubt the latter
> are the same as Lisp's, they are certainly very different from C++ and
> Java's objects.

Ah, this is what you weren't understanding about my previous points.
The whole point of linking to the CMUCL type inference manual is to show
that, with sufficient typing information inserted (such as an ML-on-CL
system would insert into the code it generates), those run-time type
checks are not done.  Try it yourself with the manual in one hand and
(disassemble #'foo) in the other.

This is one of the keys to getting even more performance out of your
raytracer implementation.  One important benefit of this ML-on-CL scheme
over hand-coded Lisp implementations is that the ML translator would be
more complete about inserting typing information.

> So I wouldn't expect good performance in either direction. Thanks to Lisp's
> simple syntax, I expect it would be substantially easier to get
> Lisp-in-OCaml working with reasonable efficiency. And no, I'm not talking
> about the whole of CL. ;-)

Of course, the burden is then on the implementation to provide dynamic
aspects of Lisp which aren't easily done in ML, like replacing functions
interactively.  Noting how one would implement ML in CL, as above,
demonstrates how OCaml trades flexibility for efficiency in its design.

Cheers.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a0259$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Julian Squires wrote:
> Ah, this is what you weren't understanding about my previous points.
> The whole point of linking to the CMUCL type inference manual is to show
> that, with sufficient typing information inserted (such as an ML-on-CL
> system would insert into the code it generates), those run-time type
> checks are not done.  Try it yourself with the manual in one hand and
> (disassemble #'foo) in the other.
> 
> This is one of the keys to getting even more performance out of your
> raytracer implementation.  One important benefit of this ML-on-CL scheme
> over hand-coded Lisp implementations is that the ML translator would be
> more complete about inserting typing information.

You can only annotate the resulting Lisp code with type declarations that
are valid in Lisp. I do not believe Lisp can represent many of OCaml's
types statically, so the easiest solution would be to resort to run-time
type checking.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Tayssir John Gabbour
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124736761.859102.114490@o13g2000cwo.googlegroups.com>
Jon Harrop wrote:
> Julian Squires wrote:
> > Ah, this is what you weren't understanding about my previous points.
> > The whole point of linking to the CMUCL type inference manual is to show
> > that, with sufficient typing information inserted (such as an ML-on-CL
> > system would insert into the code it generates), those run-time type
> > checks are not done.  Try it yourself with the manual in one hand and
> > (disassemble #'foo) in the other.
> >
> > This is one of the keys to getting even more performance out of your
> > raytracer implementation.  One important benefit of this ML-on-CL scheme
> > over hand-coded Lisp implementations is that the ML translator would be
> > more complete about inserting typing information.
>
> You can only annotate the resulting Lisp code with type declarations that
> are valid in Lisp. I do not believe Lisp can represent many of OCaml's
> types statically, so the easiest solution would be to resort to run-time
> type checking.

Slightly offtopic...

Qi is a functional language embedded in Common Lisp, and might be
closer to your interests. It purports to (formally speaking) use the
most powerful type system in existence.
http://www.lambdassociates.org/

Here's a little tutorial for ML programmers.
http://www.lambdassociates.org/qiml.htm

And here they explain points of confusion.
http://www.lambdassociates.org/studies/study03.htm

I have no opinion on it, as I haven't yet found enough time to delve
into it.


Tayssir
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <o2oOe.78358$Ph4.2465677@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
>> Ah, this is what you weren't understanding about my previous points.
>> The whole point of linking to the CMUCL type inference manual is to show
>> that, with sufficient typing information inserted (such as an ML-on-CL
>> system would insert into the code it generates), those run-time type
>> checks are not done.  Try it yourself with the manual in one hand and
>> (disassemble #'foo) in the other.
>> 
>> This is one of the keys to getting even more performance out of your
>> raytracer implementation.  One important benefit of this ML-on-CL scheme
>> over hand-coded Lisp implementations is that the ML translator would be
>> more complete about inserting typing information.
>
> You can only annotate the resulting Lisp code with type declarations that
> are valid in Lisp. I do not believe Lisp can represent many of OCaml's
> types statically, so the easiest solution would be to resort to run-time
> type checking.

I don't see why that would be the case.  Obviously polymorphic types and
variants would be resolved to single cases beforehand the same as they
are in the OCaml compiler, or checked at runtime where necessary.  The
boxing behavior (as described in the ocaml FFI documentation) is not
tremendously different from that of most Lisp compilers.  Lisp would get
used as a kind of intermediate language (like "C--" in the ocaml
compiler), except that it's my belief that one could leverage more of
Lisp and so have a higher level intermediate language, and so a simpler
ML "compiler".

In any case, I can see I'm not going to convince you with words, and I
don't have the kind of time to put into putting those words into code,
so we'll have to leave it at that.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a1740$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Julian Squires wrote:
> On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
>> You can only annotate the resulting Lisp code with type declarations that
>> are valid in Lisp. I do not believe Lisp can represent many of OCaml's
>> types statically, so the easiest solution would be to resort to run-time
>> type checking.
> 
> I don't see why that would be the case.  Obviously polymorphic types and
> variants would be resolved to single cases beforehand the same as they
> are in the OCaml compiler, or checked at runtime where necessary.

Yes, its really objects and polymorphic variants that I'm worried about.

> The 
> boxing behavior (as described in the ocaml FFI documentation) is not
> tremendously different from that of most Lisp compilers.  Lisp would get
> used as a kind of intermediate language (like "C--" in the ocaml
> compiler),

The main OCaml compiler doesn't use C-- for code gen, but yes.

> except that it's my belief that one could leverage more of 
> Lisp and so have a higher level intermediate language, and so a simpler
> ML "compiler".

Appel describes a mini-ML that would suit this purpose well. However,
generating that would throw away a lot of type information that is used in
optimisation by ocamlopt.

> In any case, I can see I'm not going to convince you with words, and I
> don't have the kind of time to put into putting those words into code,
> so we'll have to leave it at that.

I think we basically agree now. In theory, you could probably get within 20%
of the real OCaml's performance if you did a good enough job but it would
require so much work (including code gen) that, in practice, you'd be
better off writing interfaces between Lisp-compiler compiled Lisp and
OCaml-compiler compiled OCaml.

As Peter says, the step to native code generation is somewhat easier in
Lisp. However, I wrote a JIT compiler that emitted OCaml code that was then
compiled by ocamlopt. It was easy to do and it worked well.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <8mpOe.78412$Ph4.2466668@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
> The main OCaml compiler doesn't use C-- for code gen, but yes.

It doesn't?  Maybe I've misunderstood something.  What does it currently
use as an intermediate language?

> I think we basically agree now. In theory, you could probably get within 20%
> of the real OCaml's performance if you did a good enough job but it would
> require so much work (including code gen) that, in practice, you'd be
> better off writing interfaces between Lisp-compiler compiled Lisp and
> OCaml-compiler compiled OCaml.

Well, no, I continue to disagree here, because I don't think it's
necessary to reinvent code generation in this case.

Cheers.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a4451$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Julian Squires wrote:
> On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
>> The main OCaml compiler doesn't use C-- for code gen, but yes.
> 
> It doesn't?  Maybe I've misunderstood something.  What does it currently
> use as an intermediate language?

It's been a while since I looked but it uses its own code gen (there was a
trial with C-- and the code is in the repo, IIRC) and I think it uses two
intermediate languages of their own design.

>> I think we basically agree now. In theory, you could probably get within
>> 20% of the real OCaml's performance if you did a good enough job but it
>> would require so much work (including code gen) that, in practice, you'd
>> be better off writing interfaces between Lisp-compiler compiled Lisp and
>> OCaml-compiler compiled OCaml.
> 
> Well, no, I continue to disagree here, because I don't think it's
> necessary to reinvent code generation in this case.

Yes, I certainly wouldn't bother with code gen in practise.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <R0sOe.78533$Ph4.2468876@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
>> On 2005-08-22, Jon Harrop <······@jdh30.plus.com> wrote:
>>> The main OCaml compiler doesn't use C-- for code gen, but yes.
>> 
>> It doesn't?  Maybe I've misunderstood something.  What does it currently
>> use as an intermediate language?
>
> It's been a while since I looked but it uses its own code gen (there was a
> trial with C-- and the code is in the repo, IIRC) and I think it uses two
> intermediate languages of their own design.

Ah, thus the confusion.  Sorry about that, I didn't mean C-- as in
cminusminus.org, but rather the intermediate language designed by the
caml team, which also (confusingly) has the name C-- (Cmm).  Looking at
the ocaml 3.08.3 source just now, I see that the one I was talking about
is still in place, at least for the native compiler.  The bytecode
compiler appears to use an intermediate form called "lambda".

Anyway, just details at this point.
Cheers.

-- 
Julian Squires
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2r7cluaag.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Julian Squires wrote:
>> Ah, this is what you weren't understanding about my previous
>> points.  The whole point of linking to the CMUCL type inference
>> manual is to show that, with sufficient typing information inserted
>> (such as an ML-on-CL system would insert into the code it
>> generates), those run-time type checks are not done.  Try it
>> yourself with the manual in one hand and (disassemble #'foo) in the
>> other.
>> 
>> This is one of the keys to getting even more performance out of
>> your raytracer implementation.  One important benefit of this
>> ML-on-CL scheme over hand-coded Lisp implementations is that the ML
>> translator would be more complete about inserting typing
>> information.
>
> You can only annotate the resulting Lisp code with type declarations
> that are valid in Lisp.

Well, since you can define your own types that doesn't limit it
much. However the real question is what the (Common Lisp) compiler
does with those declarations. You also probably don't know about
compiler macros which give the OCaml-embedded-in-Lisp compiler author
another nifty tool.

> I do not believe Lisp can represent many of OCaml's types
> statically, so the easiest solution would be to resort to run-time
> type checking.

Insofar as Lisp gets to play with the AST for as long as it wants
before turning it into Lisp code it seems strange to say that it can't
represent type information. Really it boils down to how close to the
metal we can get in Common Lisp. And the answer is pretty
close. However to generate maximally efficient code it might be
necessary to take advantage of knowledge of a particular Common Lisp
implementation, at the minimum, knowing that certain Common Lisp
constructs will be compiled into certain machine code, and at the
maximum using the lower-level mechanism the Lisp compiler uses to
generate machine code. At that point you're sort of cheating since
you're essentially doing your own code generation. But Common Lisp
excels for writing compilers because it makes the transition from an
easy-to-write, purely interpreted implementation af a language to this
kind of implementation dependent, practically-on-bare-metal,
optimizing compiler incredibly smooth--start with an interpreter, then
use macros ta write a compiler that compiles to Common Lisp, then
improve your compiler to add type declarations to the generated code
so the Common Lisp compiler can generate more efficient machine code,
then write some compiler macros to implement certain kinds of
not-always-applicable optimizations, and finally start dipping into a
particular implementations bag of tricks to get at the raw machine
code. At the point you will only be constrained by the need to play
nicely with the rest of the Lisp runtime--you will, for instance have
to play by whatever rules the Lisp GC uses. But even that limitation
only applies if you want to have an embedded compiler--one that
generates code that can run along with Lisp code in the same
image. Otherwise you just (heh, just) need to write a runtime for your
language and completely take over all machine code generation and you
can do whatever any compiler does. But again, you got there by a quite
nice evolution from a simple interpreter.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a15c6$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Peter Seibel wrote:
> Well, since you can define your own types that doesn't limit it
> much. However the real question is what the (Common Lisp) compiler
> does with those declarations.

Exactly.

> You also probably don't know about 
> compiler macros which give the OCaml-embedded-in-Lisp compiler author
> another nifty tool.

What do they do?

> Insofar as Lisp gets to play with the AST for as long as it wants
> before turning it into Lisp code it seems strange to say that it can't
> represent type information.

Yes. I should have said "convey type information to the Lisp compiler's
native code generator". As you say, you can represent the type information
in Lisp but I don't think you can fully exploit it.

> ... But again, you got there by a quite
> nice evolution from a simple interpreter.

Yes. You can "evolve" compilers in most other languages, except you would
define an AST type and write your own interpreter (30 lines of ML for a
small FPL).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Duane Rettig
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4r7ckea94.fsf@franz.com>
Peter Seibel <·····@gigamonkeys.com> writes:

> Jon Harrop <······@jdh30.plus.com> writes:
>
>> Julian Squires wrote:
>>> Ah, this is what you weren't understanding about my previous
>>> points.  The whole point of linking to the CMUCL type inference
>>> manual is to show that, with sufficient typing information inserted
>>> (such as an ML-on-CL system would insert into the code it
>>> generates), those run-time type checks are not done.  Try it
>>> yourself with the manual in one hand and (disassemble #'foo) in the
>>> other.
>>> 
>>> This is one of the keys to getting even more performance out of
>>> your raytracer implementation.  One important benefit of this
>>> ML-on-CL scheme over hand-coded Lisp implementations is that the ML
>>> translator would be more complete about inserting typing
>>> information.
>>
>> You can only annotate the resulting Lisp code with type declarations
>> that are valid in Lisp.
>
> Well, since you can define your own types that doesn't limit it
> much.

But there are, after all, limitations - can you figure out what is
illegal about this deftype form according to the spec?

(deftype proper-list-of (&optional (car-type '*))
   `(or null
        (cons ,car-type (proper-list-of ,car-type))))


-- 
Duane Rettig    ·····@franz.com    Franz Inc.  http://www.franz.com/
555 12th St., Suite 1450               http://www.555citycenter.com/
Oakland, Ca. 94607        Phone: (510) 452-2000; Fax: (510) 452-0182   
From: Thomas Lindgren
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3y86tv61u.fsf@localhost.localdomain>
Jon Harrop <······@jdh30.plus.com> writes:

> You can only annotate the resulting Lisp code with type declarations that
> are valid in Lisp. I do not believe Lisp can represent many of OCaml's
> types statically, so the easiest solution would be to resort to run-time
> type checking.

Which types would those be? (And an ML-to-CL *compiler* should of
course abandon "the easiest solution" in favour of "the efficient
solution"! :-) The CL type system is found in Chapter 4 of the
HyperSpec, in particular 4.2.3 and surroundings may be of interest.

Were I to write an ML-to-CL compiler, it would probably mimic an ML
compiler up to some suitable point where actual code generation
occurs.  One could even take some suitable intermediate format of an
existing ML compiler and produce "low level" Lisp from that.

For example, if the hypothetical ML-to-CL compiler started out with
something like what MLton or TILT would produce internally --
defunctionalized, suitably specialized semi-or-wholly monomorphic code
with type annotations all over the place -- it would on the face of it
seem quite possible to do a good job in translating this into fast
CL. You would need to do competitive pattern match compilation, emit
suitable declarations for all primitive operations, and then possibly
tweak the code generator from there. (Use block compilation as needed,
eliminate tail recursion, declare the appropriate compiler flags, ...)

E.g., an ML int-int add might look like

 (declare (fixnum x y) (the fixnum (+ x y)))

since we know from ML that x and y must be fixed precision integers.
(NB: What about declaring the output to be a fixnum? I'm not sure what
this does to overflow checking -- some other declaration might be more
suitable. But that's a detail: if there is an overflow, the generated
Lisp code must emulate ML and signal an error, and after _that_, the
result is known to be a fixnum.)

I assume that an ML compiler also would reason about what cases
remain, e.g., if you know that x is a list and have just tested that x
is not nil, the code generator should basically infer that x _must_
from now on be a cons. Subsequent uses of x in the generated code
should then note that x must be a cons cell. The ML-to-CL compiler
should make use of the same information.

E.g., given

 (if (null x) expr1 expr2)

inside expr2, all uses of x should declare that x is a cons cell (and
perhaps more information than that, if the type is more
specific). This should then permit car and cdr to compile well, for
example. And maybe a sophisticated CL compiler like CMUCL could even
derive some of that by itself.

I'm not sure what sort of declarations would be needed to make
parametric polymorphic ML code compile well (if it can't be made
monomorphic). For starters, declaring the number of function arguments
might be useful to speed up the CL function call.

Best,
Thomas
-- 
Thomas Lindgren
"It's becoming popular? It must be in decline." -- Isaiah Berlin
 
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <fE5Ne.44$DJ5.69122@typhoon.nyu.edu>
Jon Harrop wrote:
> Peter Seibel wrote:
> 
>>Anything. E.g. a string contain the text of some other language to be
>>parsed and compiled:
>>...
> 
> 
> How is that different from calling a lexer and parser in any other language?
> 

In this case it isn't different.  The difference comes when you can do 
things like DEFINER in http://common-lisp.net/project/definer (shameless 
plug).  In any other language you have to resort to an "outside" 
processor or to lexing+parsing (always by an outside processor).

You can't win.  :)  Lisp (and Scheme and Dylan and to some extent Prolog 
and Smalltalk; there may be others) rule.


Cheers
--
marco
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2slx7ujud.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Peter Seibel wrote:
>> Anything. E.g. a string contain the text of some other language to be
>> parsed and compiled:
>> ...
>
> How is that different from calling a lexer and parser in any other language?

Because the ability to do so is built into the compiler and can
operate at the level of individual expressions. For intance given the
POSTFIX-MATH macro from my previous post I can write this function:

  (defun foo (a b)
    (* 10 (postfix-math "a b +")))

which compiles into exactly the same machine code as if I had written:

  (defun foo (a b)
    (* 10 (+ b a)))

I don't know about camlp4 so maybe this kind of thing is possible in
it as well. Though the fact that it is a file-level pre-processor
makes me wonder. Also, is it possible to compose camlp4 macros? For
example, suppose some guy down the hall writes this (trivial) control
construct macro:

  (defmacro while (condition &body body)
    `(loop (unless ,condition (return)) ,@body))

Now I write this function:

  (defun bar (a b)
    (while (< (postfix-math "a b +") 100)
      (format t "a: ~d; b: ~d~%" a b)
      (incf a (random 10))
      (incf b (random 5))))

His macro doesn't have to know anything about the "syntax" of my macro
and vis versa. Is there some easy way to do the equivalent in camlp4?

-Peter 

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Björn Lindberg
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430ee71d@news.cadence.com>
Jon Harrop wrote:
> jayessay wrote:
> 
>>Jon Harrop <······@jdh30.plus.com> writes:
>>
>>>Bj�rn Lindberg wrote:
>>>
>>>>Can you give an example of another language with Lispy macros?
>>>
>>>OCaml has camlp4.
>>
>>Those don't have the same capability.
> 
> 
> Yes.

Given this:

Jon Harrop wrote (21 Aug 05:06):

 > [...] (well, if like me you don't know camlp4)

...and this:

Jon Harrop wrote (21 Aug 06:40):

 > [...] I don't know anything about macros (as we've just witnessed!)

Exactly on what basis are you making your claim that camlp4 have the 
same capability as Lisp macros?


Bj�rn
From: ·········@gmail.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1125054500.007039.263010@f14g2000cwb.googlegroups.com>
Björn Lindberg wrote:
> Jon Harrop wrote:
> > jayessay wrote:
> >
> >>Jon Harrop <······@jdh30.plus.com> writes:
> >>
> >>>Björn Lindberg wrote:
> >>>
> >>>>Can you give an example of another language with Lispy macros?
> >>>
> >>>OCaml has camlp4.
> >>
> >>Those don't have the same capability.
> >
> >
> > Yes.
>
> Given this:
>
> Jon Harrop wrote (21 Aug 05:06):
>
>  > [...] (well, if like me you don't know camlp4)
>
> ...and this:
>
> Jon Harrop wrote (21 Aug 06:40):
>
>  > [...] I don't know anything about macros (as we've just witnessed!)
>
> Exactly on what basis are you making your claim that camlp4 have the
> same capability as Lisp macros?

He's trolling. He also wrote that Lisp is 10 times more verbose than ML
[1]. A rather strong claim for someone who also claims complete
ignorance of Lisp, isn't it?

[1] http://groups.google.com/group/comp.lang.lisp/msg/d2e5476b7fb7f39c
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430f031d$0$17469$ed2e19e4@ptn-nntp-reader04.plus.net>
·········@gmail.com wrote:
> He's trolling. He also wrote that Lisp is 10 times more verbose than ML
> [1]. A rather strong claim for someone who also claims complete
> ignorance of Lisp, isn't it?

Richard Fateman wrote the Lisp code, not me. If you think you could write
more succinct and efficient Lisp code then I'd like to see it.

> [1] http://groups.google.com/group/comp.lang.lisp/msg/d2e5476b7fb7f39c

That's a single observation based on three roughly equivalent programs
(implementing the core of Mathematica) in ~C, Lisp and OCaml. That does not
consitute "Lisp is 10 times more verbose than ML" as you say. As I've
already stated, LOC is overly harsh for Lisp (IMHO).

When faced with a claim that Lisp is better for writing DSLs, that
observation was the obvious counterexample.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430f01a6$0$17469$ed2e19e4@ptn-nntp-reader04.plus.net>
Bj�rn Lindberg wrote:
> Exactly on what basis are you making your claim that camlp4 have the
> same capability as Lisp macros?

OCaml's camlp4 macros do not have the same capability as Lisp's macros.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mk1teF172p79U1@individual.net>
Jon Harrop wrote:
> Bj�rn Lindberg wrote:
>> Can you give an example of another language with Lispy macros?
> 
> OCaml has camlp4. Many other languages (like Mathematica) have equivalent
> capabilities but they aren't called "macros".

A quick look at the camlp4 tutorials tells me "yuck!"  Nemerle macros 
are just as great.  Sure, Lisp macros have plenty of quasiquotation, but 
that's just one level of abstraction, instead of 10+ lexical symbols 
with meanings you have to learn (like Perl).

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Ivan Boldyrev
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ntgrt2-ska.ln1@ibhome.cgitftp.uiggm.nsc.ru>
On 9205 day of my life Jon Harrop wrote:
> Bj�rn Lindberg wrote:
>> Can you give an example of another language with Lispy macros?
>
> OCaml has camlp4.

O yeh, and C has yacc. :-7

-- 
Ivan Boldyrev

                       Perl is a language where 2 x 2 is not equal to 4.
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4304aaca$0$37082$edfadb0f@dread12.news.tele.dk>
Bj�rn Lindberg wrote:
> Jon Harrop wrote:
> 
>>> Lisp is a bit tricky to place on this spectrum, since it contains
>>> uncommon abstraction tools like macros and whatnot.

>> There are several other languages with equivalent capabilities so Lisp
>> certainly isn't unique in this respect. Lisp is, however, much older than
>> the others.
> 
> Can you give an example of another language with Lispy macros?

Whether they are "Lispy" is a matter of opinion, but how about BigWig?

     <http://www.brics.dk/bigwig/tutorial/macro/>

   "Growing Languages with Metamorphic Syntax Macros"

   Claus Brabrand | Michael I. Schwartzbach
   PEPM'02

   We present our experiences with a syntax macro language which we claim
   forms a general abstraction mechanism for growing (domain-specific)
   extensions of programming languages. Our syntax macro language is
   designed to guarantee type safety and termination. A concept of
   metamorphisms allows the arguments of a macro to be inductively
   defined in a meta level grammar and morphed into the host language. We
   also show how the metamorphisms can be made to operate simultaneously
   on multiple parse trees at once. The result is a highly flexible
   mechanism for growing new language constructs without resorting to
   compile-time programming. In fact, whole new languages can be defined
   at surprisingly low cost. This work is fully implemented as part of
   the <bigwig> system for defining interactive Web services, but could
   find use in many other languages.

   <http://www.brics.dk/~mis/macro.ps>
   <http://www.brics.dk/~mis/macro.pdf>

-- 
Jens Axel S�gaard
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <rp2Ne.43$DJ5.69267@typhoon.nyu.edu>
Jens Axel S�gaard wrote:
> Bj�rn Lindberg wrote:
> 
>> Jon Harrop wrote:
>>
>>>> Lisp is a bit tricky to place on this spectrum, since it contains
>>>> uncommon abstraction tools like macros and whatnot.
> 
> 
>>> There are several other languages with equivalent capabilities so Lisp
>>> certainly isn't unique in this respect. Lisp is, however, much older 
>>> than
>>> the others.
>>
>>
>> Can you give an example of another language with Lispy macros?
> 
> 
> Whether they are "Lispy" is a matter of opinion, but how about BigWig?

They are not Lispy in the sense that they are not parto of the language.

> 
>     <http://www.brics.dk/bigwig/tutorial/macro/>
> 
>   "Growing Languages with Metamorphic Syntax Macros"
> 
>   Claus Brabrand | Michael I. Schwartzbach
>   PEPM'02
> 
>   We present our experiences with a syntax macro language which we claim
>   forms a general abstraction mechanism for growing (domain-specific)
>   extensions of programming languages. Our syntax macro language is
>   designed to guarantee type safety and termination. A concept of
>   metamorphisms allows the arguments of a macro to be inductively
>   defined in a meta level grammar and morphed into the host language. We
>   also show how the metamorphisms can be made to operate simultaneously
>   on multiple parse trees at once. The result is a highly flexible
>   mechanism for growing new language constructs without resorting to
>   compile-time programming. In fact, whole new languages can be defined
>   at surprisingly low cost. This work is fully implemented as part of
>   the <bigwig> system for defining interactive Web services, but could
>   find use in many other languages.
> 
>   <http://www.brics.dk/~mis/macro.ps>
>   <http://www.brics.dk/~mis/macro.pdf>
> 

They do not even consider (Common) Lisp macros.  This is a glaring omission.

Cheers
--
Marco
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124272371.240740.231260@g47g2000cwa.googlegroups.com>
Jon Harrop wrote:

> Firstly, do you agree that languages are evolving to be more concise?
> Secondly, do you agree that more concise languages tend to have more
> complicated grammars? Finally, what other reason could drive this
> association?
>
> I believe that languages are evolving to be more concise and to have more
> complicated grammars. I can see no reason for complicating grammars unless
> it aids brevity/elegance/comprehensibility. So I see the evolution of
> natural and programming languages as a huge amount of evidence that
> complicated grammars are used to simplify the use of languages.
>
> IMHO, humans are very good at deciphering expressions written in complicated
> grammars, and this is why we make things easier for ourselves by
> complicating grammars. In particular, we are better at understanding many
> short expressions written in the context of a complicated grammar, rather
> than many long expressions written with a very simple grammar.

If you are speaking of OCaml I guess. What was your biggest program in
concise OCaml code you have ever completed?

I for myself write only short programs (a few thousand lines of code in
Scheme/Bigloo) since I am in academia.

But I am not convinced that OCaml will shine when you have to cope with
100000 lines of Ocaml code.

May short expressions and one liners are good for writing small
programs and benchmarks and algorithms. However, what are they good for
in really big projects. How easy is it to adapt one-liners and short
expressions? I mean what will you do your whole structure of  program
has changed? 

Schneewittchen
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43034572$0$97124$ed2619ec@ptn-nntp-reader03.plus.net>
F�rster vom Silberwald wrote:
> Jon Harrop wrote:
>> IMHO, humans are very good at deciphering expressions written in
>> complicated grammars, and this is why we make things easier for ourselves
>> by complicating grammars. In particular, we are better at understanding
>> many short expressions written in the context of a complicated grammar,
>> rather than many long expressions written with a very simple grammar.
> 
> If you are speaking of OCaml I guess.

No, I meant in general.

> What was your biggest program in concise OCaml code you have ever
> completed? 

I'm still working on one that is ~20kLOC. That was originally written in C++
but the C++ version became too difficult to maintain. Even with the benefit
of hindsight, I do not know of any better way that I could have structured
the C++ program in order to make it maintainable.

> I for myself write only short programs (a few thousand lines of code in
> Scheme/Bigloo) since I am in academia.

Yes, most of my programs are probably ~2kLOC. For my PhD (in computational
physics), I wrote 1-10kLOC programs in C++. If I were to do it again, I
would write much smaller programs in OCaml to save time.

> But I am not convinced that OCaml will shine when you have to cope with
> 100000 lines of Ocaml code.

I think the OCaml compilers themselves are the first evidence to the
contrary. There are probably many other OCaml programs that exceed 100kLOC.
It won't be long before mine does and it is a high-performance graphics
application (i.e. not your typical FPL program).

> May short expressions and one liners are good for writing small
> programs and benchmarks and algorithms. However, what are they good for
> in really big projects. How easy is it to adapt one-liners and short
> expressions? I mean what will you do your whole structure of  program
> has changed?

The main reasons that OCaml is more concise than C++, say, are type
inference for small programs and modules, functors and higher-order
functions for large programs.

Similar arguments apply more generally to OCaml/SML/Haskell/Clean/Lisp vs
C/C++/Java/C#. I believe that functional languages require asymptotically
less code thanks to the extra "dimension" of factoring allowed by HOFs. I'm
sure everyone here will agree... :-)

As for OCaml vs Lisp, I don't know Lisp well enough to comment. All I can
say is that I'm finding it really easy to maintain ~20kLOC OCaml programs.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87mzngbjsp.fsf@thalassa.informatimago.com>
Jon Harrop <······@jdh30.plus.com> writes:
>> But I am not convinced that OCaml will shine when you have to cope with
>> 100000 lines of Ocaml code.
>
> I think the OCaml compilers themselves are the first evidence to the
> contrary. There are probably many other OCaml programs that exceed 100kLOC.
> It won't be long before mine does and it is a high-performance graphics
> application (i.e. not your typical FPL program).

mldonkey is 220kLoC of Ocaml.

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
The rule for today:
Touch my tail, I shred your hand.
New rule tomorrow.
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mh8n8F16litbU2@individual.net>
Pascal Bourguignon wrote:
>> I think the OCaml compilers themselves are the first evidence to the
>> contrary. There are probably many other OCaml programs that exceed 100kLOC.
>> It won't be long before mine does and it is a high-performance graphics
>> application (i.e. not your typical FPL program).
> 
> mldonkey is 220kLoC of Ocaml.

I only used it once years ago, but back then it was kind of slow and had 
lots of memory leaks (yes, the kind of thing you'd only expect in 
languages without GC).

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124447457.304687.203980@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:

> As for OCaml vs Lisp, I don't know Lisp well enough to comment. All I can
> say is that I'm finding it really easy to maintain ~20kLOC OCaml programs.

I think you had a bad start with Scheme/Common Lisp. They will leave
you go away if you stop counting parentheses. Really, your Emacs editor
is your friend (if you don't like Bigloo* and want to use some other
Scheme languages, then, there is more a general Emacs mode "Quaks"
available).

However, there will always be a serious fact: some people don't like
things. I for one don't like OCaml. I am not the guy who can cope with
a lot of idiosyncracies. Although, I like Scheme and maybe Common Lisp
for the matter.

You shouldn't be too distracted just in case you do not like Scheme or
CommonLisp.  It is nothing wrong supposed OCaml and ML-languages serve
your purpose. It is a pitty that a lot of Schemers and Common Lispers
will never understand that not everyone likes their pet languages.

Schneewittchen
* see my other post in comp.lang.scheme (in the Stalin thread were I
would like to point out to you that every Scheme implementation has a
lot to offer).
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87ll31cehy.fsf@thalassa.informatimago.com>
Jon Harrop <······@jdh30.plus.com> writes:
> Firstly, do you agree that languages are evolving to be more concise?

No. Hebrew is more concise than English.  Even Latin is more concise.

> Secondly, do you agree that more concise languages tend to have more
> complicated grammars? 

Yes, somewhat.

> Finally, what other reason could drive this
> association?

Mu.

> I believe that languages are evolving to be more concise and to have more
> complicated grammars. I can see no reason for complicating grammars unless
> it aids brevity/elegance/comprehensibility. So I see the evolution of
> natural and programming languages as a huge amount of evidence that
> complicated grammars are used to simplify the use of languages.

If that was true, you'd still be speaking Latin, or even some older language.

$ wc <<EOF
Our Father, Who art in heaven,
Hallowed be Thy Name.
Thy Kingdom come.
Thy Will be done, on earth as it is in Heaven. 
Give us this day our daily bread.
And forgive us our trespasses,
as we forgive those who trespass against us.
And lead us not into temptation,
but deliver us from evil. Amen.
EOF
      9      56     294

$ wc <<EOF
Pater noster qui est in caelis,
sanctificetur nomen tuum;
adveniat regnum tuum;
fiat voluntas tua, sicut in caelo et in terra.
Panem nostrum cotidianum da nobis hodie,
et dimitte nobis debita nostra,
sicut et nos dimittimus debitoribus nostris;
et ne nos inducas in tentationem
Sed libera nos a malo. Amen
EOF
      9      50     306


English seems to be 11% less concise than Latin (in word count; it
probably would be in character count too if it had a more phonetic
orthography).  Let's switch to Latin!


> IMHO, humans are very good at deciphering expressions written in complicated
> grammars, and this is why we make things easier for ourselves by
> complicating grammars. In particular, we are better at understanding many
> short expressions written in the context of a complicated grammar, rather
> than many long expressions written with a very simple grammar.

I'm not so sure.

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
-----BEGIN GEEK CODE BLOCK-----
Version: 3.12
GCS d? s++:++ a+ C+++ UL++++ P--- L+++ E+++ W++ N+++ o-- K- w--- 
O- M++ V PS PE++ Y++ PGP t+ 5+ X++ R !tv b+++ DI++++ D++ 
G e+++ h+ r-- z? 
------END GEEK CODE BLOCK------
From: [Invalid-From-Line]
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <874q9pnm2b.fsf@kafka.homenet>
Pascal Bourguignon <····@mouse-potato.com> writes:

> Jon Harrop <······@jdh30.plus.com> writes:
> > Firstly, do you agree that languages are evolving to be more concise?
> 
> No. Hebrew is more concise than English.  Even Latin is more concise.

Right.
B: in
'a (ha if in isolation): the
cheder : room

Thus bacheder means "in the room"
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <zmrg8slj.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> I heard that infix parsers are rarely used not because infix is worse but
> because infix is unusual in Lisp code and increases incompatibility. Would
> you agree with that?

Infix parsers are rarely used for several reasons.  The increase in
incompatibility is one reason, but there are others:

     Infix macros are *hard*.

     You need to learn the prefix notation *anyway* (because code that
     operates on code needs to operate at the abstract syntax level,
     which in lisp is naturally prefix-notated lists).

     Infix only works well on binary operations (consider the
     popularity of ternary ? : operator).

>>> There is unquestionably a huge amount of evidence to the contrary. Most
>>> natural and programming languages have complicated grammars precisely
>>> because it simplifies their use and makes them easier to understand.
>> 
>> Would you please point us to evidence in this direction?
>
> Firstly, do you agree that languages are evolving to be more concise?

No.

> Secondly, do you agree that more concise languages tend to have more
> complicated grammars? 

Possibly.  They may simply have larger vocabularies.

> Finally, what other reason could drive this association?

Languages drift.  And as I mentioned before, bandwidth and error
correction are importand factors.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87fyt8bi3x.fsf@thalassa.informatimago.com>
Joe Marshall <···@ccs.neu.edu> writes:
> Infix parsers are rarely used for several reasons.  The increase in
> incompatibility is one reason, but there are others:
>
>      Infix macros are *hard*.
>
>      You need to learn the prefix notation *anyway* (because code that
>      operates on code needs to operate at the abstract syntax level,
>      which in lisp is naturally prefix-notated lists).
>
>      Infix only works well on binary operations (consider the
>      popularity of ternary ? : operator).

First, let me mention that this is a non-problem, for mathematical
expressions are the exception in programs.

Perhaps not in the first programs that were written to compute tables
of mathematical functions and to compute atomic bomb parameters, but
since then the most complex mathematical operation is computing the
percentage earned by sellers.


Then, for the few programs that need mathematical formula, I'd say
let's expand our horizon a little: mathematics notations are more
complex than mere infix.  There is a 2D geographic syntax, with
subexpressions written below, above, on the right, on the left, or any
combination, with change in font and size, and adorned by a large
assortment of symbols.

Writting a macro to convert infix notation to prefix is just a crude
pis-aller.  What you really want, is a macro that will convert complex
mathematical formula drawings into s-expressions.  

Have a look at the way DrScheme allows pictures atoms in the RELP:
mathematician programmers want to draw their formula on such pictures.
Or perhaps they'd be happy with TeX input, but let the IDE render the
formula when displaying the lisp source.

See for example http://www-sop.inria.fr/lemme/Loic.Pottier/ei98.ps
There's also an older paper in the AIM serie about parsing these 2D
formula.



If mathematical formula were really a serrious objection to
s-expression, then that'd be the kind of parser macro that would have
been written.





(with-ob-lisp
    (defconstant |
                      pi
                 3 * ----
                      2
| (* 3/2 pi))
    (print |
                      pi
                 3 * ----
                      2
|))

   
-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
Grace personified,
I leap into the window.
I meant to do that.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4303ed7a$0$1310$ed2619ec@ptn-nntp-reader02.plus.net>
Pascal Bourguignon wrote:
> Writting a macro to convert infix notation to prefix is just a crude
> pis-aller.  What you really want, is a macro that will convert complex
> mathematical formula drawings into s-expressions.

Yes. This is exactly what Mathematica does and it is much better than any
other programming language I have used in that respect.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305fb5e$0$17470$ed2e19e4@ptn-nntp-reader04.plus.net>
Joe Marshall wrote:
> Infix parsers are rarely used for several reasons.  The increase in
> incompatibility is one reason, but there are others:
> 
>      Infix macros are *hard*.

Do you mean it is difficult to implement infix in Lisp?

>      You need to learn the prefix notation *anyway* (because code that
>      operates on code needs to operate at the abstract syntax level,
>      which in lisp is naturally prefix-notated lists).

So Lisp is rather tied to the built-in prefix notation.

>      Infix only works well on binary operations

Yes.

>      (consider the popularity of ternary ? : operator).

I used to use that a fair bit. Are you saying it is popular or unpopular?

> Languages drift.  And as I mentioned before, bandwidth and error
> correction are importand factors.

Do you think that syntax is a significant source of errors when programming?
I believe that my errors tend to be at higher levels, e.g. algorithms.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124470454.597951.165180@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Joe Marshall wrote:
> > Infix parsers are rarely used for several reasons.  The increase in
> > incompatibility is one reason, but there are others:
> >
> >      Infix macros are *hard*.
>
> Do you mean it is difficult to implement infix in Lisp?

He didn't mention Lisp, specifically, did he? This is the kind of
perhaps-unintentionaly-smarmy response that raised my hackles.

You need to back up and understand the full context.

Lisp macros generally take Lispy code (uniform prefix notation,
expressed as Lisp lists-within-lists), and use list processing
techiques to rearrange the code, doing computation as necessary based
on arbitrarily elaborate analysis of the list structure and contents,
to produce code suitable for the Lisp compiler which solves the
programmers' real problem from code that was originally expressed in
programmer-friendly terms.

Several things go "wrong" with this when you try to use infix or
Algol-style notation in this scheme.

One is that Algol notation introduces lots of non-uniformity in the
syntax. Variable declarations look very different from ordinary
procedural code. Mathematical expressions use all sorts of detailed
precedence and associativity rules. Lots of semicolons and commas and
different sorts of braces are used. Sometimes statement-like
expressions return values. Sometimes they don't.

The end result is that you need either to write very limited macros, or
you need to implement a full Algol-style parser to construct a
language-specific AST, in order for your "macro" to understand the full
meaning of your code.

Think about how much work it is, for instance, to get the C
preprocessor to produce code which can be used "anywhere" within a C
program. Lots of backlashes to combine lines. Extra braces and
parenthesis, and even gratuitous iteration notation to make code fit
into the C syntax tree, even if the C macro call is sitting, unbraced,
as the single statement in a "then" clause of an "if" statement.

All of that can be ignored with Lisp macros, because the syntax is so
uniform that there is no need to create an artificial AST to represent
it.

And, because macros both consume and produce the same kind of
Lispy-syntax, it is trivial to combine macros on top of macros on top
of macros, without worrying about any kind of "impedance mismatch"

To add object-orientation to C, Stroustrup had to write a whole
preprocessor, and introduce yet more punctuation to the language. CLOS
"adds" object orientation to Lisp without needing any new syntactic
form.

Historically, both Scheme and Dylan took different approaches to the
macro "problem." Dylan tried to combine Algol-like-infix syntax for
marketing appeal with Lisp-like macros for power. It didn't succeed in
the marketplace. Scheme provides a slightly safer mechanism, and
generally doesn't convince Common Lispers that there is much to be
gained by doing so.

Infix macros are "hard" because the same kind of non-uniformity that
makes sense for human readers (who have lots of math training and
linguistic experience compared to a computer) makes it much harder for
simple code to usefully "understand" and safely manipulate. Just to
throw out one example: Scheme compilers can be a few pages long. C++
compilers are major industrial projects.
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <d5o9c0jp.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
>> Infix parsers are rarely used for several reasons.  The increase in
>> incompatibility is one reason, but there are others:
>> 
>>      Infix macros are *hard*.
>
> Do you mean it is difficult to implement infix in Lisp?

No, *that's* easy.  A simple infix parser in Lisp is an undergraduate
homework assignment.

I mean that designing a *good* macro system for an infix language is
something worthy of a research paper:

    Daniel Weise and Roger Crew. Programmable syntax
    macros. In Proceedings of the SIGPLAN '93
    Conference on Programming Language Design and
    Implementation, pages 156-165, June 1993.

    @inproceedings{504285,
     author = {Jonthan Bachrach and Keith Playford},
     title = {The Java syntactic extender (JSE)},
     booktitle = {OOPSLA '01: Proceedings of the 16th ACM SIGPLAN conference on Object oriented programming, systems, languages, and applications},
     year = {2001},
     isbn = {1-58113-335-9},
     pages = {31--42},
     location = {Tampa Bay, FL, USA},
     doi = {http://doi.acm.org/10.1145/504282.504285},
     publisher = {ACM Press},
     address = {New York, NY, USA},
     }

And even these examples have limitations.

>>      You need to learn the prefix notation *anyway* (because code that
>>      operates on code needs to operate at the abstract syntax level,
>>      which in lisp is naturally prefix-notated lists).
>
> So Lisp is rather tied to the built-in prefix notation.

Yes and no.  Most computer language implementations operate on the
abstract syntax after parsing.  Some languages allow you to
programmatically manipulate the abstract syntax tree from within the
language.  In general, you need to do data-directed dispatch because
computer languages usually offer the user a choice between several
syntactic forms in a program.  For instance, most languages allow you
to nest expressions or use a variable in place of a literal.

You can define a `Code-DOM' --- a set of class objects representing
the various syntactic forms --- and this is popular these days, or you
can use a general-purpose data structure, like a list.  If you use a
list, it's most efficient to put a token in the first element that
describes the syntactic form.  A symbolic token makes debugging
easier.

I've just described the internal representation of the original Lisp
abstract syntax.  McCarthy wrote an evaluator for this representation
to demonstrate that the language was universal.  Steve Russell noticed
that you could use McCarthy's EVAL as an interpreter by just hooking
up the I/O.  The `drawback' was that you had to enter the code in
fully parenthesised prefix notation (the AST form) because the Lisp
parser wasn't done.  But it turned out that it wasn't much of a
drawback after all and some people actually liked it.  The original
Lisp syntax was abandoned.

Incidentally, it looked a bit like this, modulo some non-ascii
characters: 

 collect[z;n] = prog[[a;d;m;h]

 BEGIN     [null[z] -> return [DONE]
           h := car[z]
           a := car[h]
           d := cdr[h]
           m := mkd[h]

           [~m -> rplacd[mark[h];n]]
           [m \/ mkd[a] \/ mkd[d] ->
                 write[[m -> d;T -> n];
                       [mkd[a] -> v[a]; m -> n; T -> n+1];
                       [m -> d+1;mkd[d] -> v[d];T -> n+1];
           z := cdr[z]
           [~m /\ ~mkd[d] /\ ~mkd[a] -> z := cons[h;z]]
           [m \/ mkd[d] /\ ~mkd[a] -> z := cons[a;z]]
           [~m /\ ~mkd[d] -> z := cons[d;z]]
           [~m -> n := n+1]
           go[BEGIN]]

Which I'm sure you all recognize as Minsky's garbage collector
algorithm which he presented at the first ILC in 1963/4

>>      Infix only works well on binary operations
>
> Yes.
>
>>      (consider the popularity of ternary ? : operator).
>
> I used to use that a fair bit. Are you saying it is popular or unpopular?

It is definitely less popular than if...else, even in cases where it
is more appropriate.

> Do you think that syntax is a significant source of errors when programming?

Yes.

> I believe that my errors tend to be at higher levels, e.g. algorithms.

I rarely choose the wrong algorithm by accident.  I often have
misplaced semicolons in C or Java.

~jrm
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2acjdpxvf.fsf@gigamonkeys.com>
Joe Marshall <···@ccs.neu.edu> writes:

> You can define a `Code-DOM' --- a set of class objects representing
> the various syntactic forms --- and this is popular these days, or you
> can use a general-purpose data structure, like a list.

It's also worth noting that even if you define a Code-DOM for your
language, you still need a way to represent arbitrary constructs that
don't exist in the base language. Hmmm, how should we represent
arbitrary constructs--maybe some kind of AST? Hmmm, what's a good way
to represent a completele generic AST node? How about a pair--we can
hold the node value in one side of the pair and then a linked list of
child nodes in the other side of the pair. But that's an s-expression
(i.e. a bunch of cons cells). So even if you have a Code-DOM, to
support arbitrary syntactic abstractions you'll still end up with
s-expressions.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062da3$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Joe Marshall wrote:
> ...

That was fascinating. Thanks! :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mmer4F17ahuuU1@individual.net>
Jon Harrop wrote:
> Joe Marshall wrote:
>> Infix parsers are rarely used for several reasons.  The increase in
>> incompatibility is one reason, but there are others:
>>
>>      Infix macros are *hard*.
> 
> Do you mean it is difficult to implement infix in Lisp?

No, others mentioned that *there are* infix parsers (reader macros) for 
Lisp.  Most people don't use them I guess :D

>>      You need to learn the prefix notation *anyway* (because code that
>>      operates on code needs to operate at the abstract syntax level,
>>      which in lisp is naturally prefix-notated lists).
> 
> So Lisp is rather tied to the built-in prefix notation.

Not at all.  Implement whatever syntax you want.  If you don't like Lisp 
syntax at all use a complete, different syntax (and language) like Dylan.

>>      (consider the popularity of ternary ? : operator).
> 
> I used to use that a fair bit. Are you saying it is popular or unpopular?

It's not exactly elegant IMHO.  I MUCH prefer Lisp's style of everything 
being an expression, so I can simply say (if (test bla) foo bar) 
*everywhere* I want, unlike C, which makes that stupid if-else/?: 
distinction (and both look ugly, and if-else requires ();{}s all over 
the place).

>> Languages drift.  And as I mentioned before, bandwidth and error
>> correction are importand factors.
> 
> Do you think that syntax is a significant source of errors when programming?
> I believe that my errors tend to be at higher levels, e.g. algorithms.

True, an experienced C coder probably doesn't make syntax mistakes, or 
just ()s the code to be sure.  But I feel like in a straight-jacket when 
coding C.  Not because of memory management, but because of syntax (like 
the inability to use if and switch as an expression, the need to create 
a named function in order to pass it as a parameter, the awkward syntax 
for creating structures that I want to pass as arguments...).

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43060b53$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
Ulrich Hobelmann wrote:
> Jon Harrop wrote:
>> Joe Marshall wrote:
>>>      Infix macros are *hard*.
>> 
>> Do you mean it is difficult to implement infix in Lisp?
> 
> No, others mentioned that *there are* infix parsers (reader macros) for
> Lisp.  Most people don't use them I guess :D

Then why are they "hard"? Perhaps I misunderstood Joe. I thought he meant it
was difficult to implement infix operators in Lisp. Now I'm thinking maybe
he meant it is easy to implement infix operators but it is then difficult
to write macros that use infix syntax?

Presumably that is a Lisp-specific problem because Mathematica has no
problem using infix notation...

>>>      You need to learn the prefix notation *anyway* (because code that
>>>      operates on code needs to operate at the abstract syntax level,
>>>      which in lisp is naturally prefix-notated lists).
>> 
>> So Lisp is rather tied to the built-in prefix notation.
> 
> Not at all.  Implement whatever syntax you want.  If you don't like Lisp
> syntax at all use a complete, different syntax (and language) like Dylan.

So you disagree with Joe saying that "lisp is naturally prefix-notated
lists". You believe that prefix/infix/postfix makes no difference in Lisp?

>>>      (consider the popularity of ternary ? : operator).
> 
> It's not exactly elegant IMHO.  I MUCH prefer Lisp's style of everything
> being an expression, so I can simply say (if (test bla) foo bar)
> *everywhere* I want, unlike C, which makes that stupid if-else/?:
> distinction (and both look ugly, and if-else requires ();{}s all over
> the place).

Yes, everything being an expression is definitely better.

>> Do you think that syntax is a significant source of errors when
>> programming? I believe that my errors tend to be at higher levels, e.g.
>> algorithms.
> 
> True, an experienced C coder probably doesn't make syntax mistakes, or
> just ()s the code to be sure.  But I feel like in a straight-jacket when
> coding C.  Not because of memory management, but because of syntax (like
> the inability to use if and switch as an expression, the need to create
> a named function in order to pass it as a parameter, the awkward syntax
> for creating structures that I want to pass as arguments...).

Yes. To be fair, you are trying to do functional programming in an
imperative language. Most people do the converse, trying to do imperative
programming in a functional language. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <1124471313.002498.258420@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
> > Jon Harrop wrote:
> >> Joe Marshall wrote:
> >>>      Infix macros are *hard*.
> >>
> >> Do you mean it is difficult to implement infix in Lisp?
> >
> > No, others mentioned that *there are* infix parsers (reader macros) for
> > Lisp.  Most people don't use them I guess :D
>
> Then why are they "hard"? Perhaps I misunderstood Joe. I thought he meant it
> was difficult to implement infix operators in Lisp. Now I'm thinking maybe
> he meant it is easy to implement infix operators but it is then difficult
> to write macros that use infix syntax?
>
> Presumably that is a Lisp-specific problem because Mathematica has no
> problem using infix notation...

Mathematica takes a TOTALLY different approach to programming
abstraction. And I'd hardly consider the resulting dog's breakfast as
"no problem."

Look how much punctuation Mathematica introduces to keep "infix"
notation going.

// to use postfix notation
~ f ~ to use prefix functions as infix
double square brackets for indexing
·@ and ··@
^=
:=
^:=
 /: with = (TagSet)
/: with := (TagSetDelayed)
single, double, triple underscore prefixes and suffixes
-> vs. :>
Hold, HoldForm, Literal, Unevaluated
_h
x_h
x_:v
x_h:v
x:pattern
pattern ? test
pattern1 | pattern2 | pattern3 ...
pattern /; condition

one could go on and on. I haven't even mentioned Mathematica's notation
for anonymous functions yet. None of this punctuation is used in
conventional mathematics. 

Blech.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <430714e5$0$22945$ed2619ec@ptn-nntp-reader01.plus.net>
··············@hotmail.com wrote:
> Mathematica takes a TOTALLY different approach to programming
> abstraction. And I'd hardly consider the resulting dog's breakfast as
> "no problem."

You can write a term-level interpreter for Mathematica just as you can write
an interpreter for any other language. You can write programs in
Mathematica as you would in other languages. It is, after all, evaluated in
basically the same way.

> Look how much punctuation Mathematica introduces to keep "infix"
> notation going.
> ...

Just to clarify, many of the symbols that you've listed are simply infix
functions. You've also omitted a lot of Mathematica's syntax (which is
unusually complicated).

Mathematica can also be very concise. For example, the following squares
each element in a list "l":

  #^2&·@l

The OCaml equivalent is:

  List.map (fun x -> x*x) l

The C++ equivalent is:

  int sqr(int x) { return x*x; }
  transform(l.begin(), l.end(), l.begin(), sqr);

OCaml's grammar contains much of the complexity but also a great deal more
(such as the typing of polymorphic variants). To put things in perspective,
my (incomplete) Mathematica grammar was 200LOC and OCaml's grammer is
1,500LOC.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <1124544459.086026.42560@f14g2000cwb.googlegroups.com>
Jon Harrop wrote:
> ··············@hotmail.com wrote:
> > Mathematica takes a TOTALLY different approach to programming
> > abstraction. And I'd hardly consider the resulting dog's breakfast as
> > "no problem."
>
> You can write a term-level interpreter for Mathematica just as you can write
> an interpreter for any other language. You can write programs in
> Mathematica as you would in other languages. It is, after all, evaluated in
> basically the same way.

For sufficiently complicated definitions of "basic", perhaps. The
Mathematica programs will have all sorts of little traps that you might
not find.

My personal "favorite" is Apply, where I had not defined a function
"g". Check out Mathematica's documentation

Apply[Plus, g[a,b]] --> a+b

WTF! Hey, what happened to my function g? And, amazingly enough, at
least some folks at Wolfram think this is what Lisp does, and they'll
start explaining to you about the "head" of an expression....

You can also write a Mathematica-like syntax parser in Lisp. Look up
Fateman's "mma" If you want to.

> Just to clarify, many of the symbols that you've listed are simply infix
> functions. You've also omitted a lot of Mathematica's syntax (which is
> unusually complicated).

And some of those "functions" affect the pattern *transformations* of
the expressions they define. That's not what Lisp folks mean when they
talk about functional programming.

> Mathematica can also be very concise. For example, the following squares
> each element in a list "l":
>
>   #^2&·@l

Yes, because Mathematica has the "feature" that it automatically
distributes functions over lists.
What you are doing is not the same as other programming environments.

> The OCaml equivalent is:
...

irrelevant. You seem to be seriously mixing up surface syntax with
evaluation semantics.

Yes, you can write programs in Mathematica very easily in a huge number
of various approaches. But will they come when you call them? Can you
really be sure what they do? That you can avoid name capture? That
things don't depend on the exact structure of the expressions you pass
in? That you really made the right choice between Hold and Unevaluated?
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <43074459$0$1288$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
> My personal "favorite" is Apply, where I had not defined a function
> "g". Check out Mathematica's documentation
> 
> Apply[Plus, g[a,b]] --> a+b
> 
> WTF! Hey, what happened to my function g? And, amazingly enough, at
> least some folks at Wolfram think this is what Lisp does,

Let's trying writing a Lisp macro equivalent to Mathematica's Apply.

In Mathematica, Apply may be defined as:

  Apply[h_, _[args___]] := h[args]

For example:

In[1]:= Apply[Plus, g[3]]

evaluates Plus[3] to give:

Out[1]= 3

Effectively, Mathematica's Apply replaces a call to one function (_) with a
call to another (h).

Here's my puny attempt at a Lisp conversion that only works for
single-argument functions:

  (defmacro mapply (`f `(g arg))
    `(,f ,arg))

Repeating the same example, Lisp gives a few warnings but it seems to work:

* (mapply `+ `(g 3))

; In: LAMBDA (#:WHOLE-1516 #:ENV-1517)

;   (LET* (# # # # #)
;     `(,F ,ARG))
; Note: Variable G defined but never used.
;
; Note: Variable QUOTE defined but never used.
;
3

> and they'll start explaining to you about the "head" of an expression....
> 
> You can also write a Mathematica-like syntax parser in Lisp. Look up
> Fateman's "mma" If you want to.

I've already looked at it.

>> Just to clarify, many of the symbols that you've listed are simply infix
>> functions. You've also omitted a lot of Mathematica's syntax (which is
>> unusually complicated).
> 
> And some of those "functions" affect the pattern *transformations* of
> the expressions they define. That's not what Lisp folks mean when they
> talk about functional programming.

If you mean the difference between Rule and RuleDelayed, for example, then I
believe that is probably just shorthand for quoting and not quoting in a
Lisp macro.

>> Mathematica can also be very concise. For example, the following squares
>> each element in a list "l":
>>
>>   #^2&·@l
> 
> Yes, because Mathematica has the "feature" that it automatically
> distributes functions over lists.

No. If you want a function to be threaded over its arguments when they are
lists then you must set the Listable attribute for that function. There are
many possible attributes. This is one piece of functionality that makes
Mathematica's pattern matching much more powerful than most other forms of
pattern matching. However, Mathematica's pattern matching can be far from
linear.

In that example, I used the map function which can be written "func ·@ list"
in Mathematica's infix notation. The same infix notation can be defined in
OCaml or SML, for example:

# let ( ·@ ) f l = List.map f l;;
val ( ·@ ) : ('a -> 'b) -> 'a list -> 'b list = <fun>

> What you are doing is not the same as other programming environments.

Yes. Equivalently, what Lisp macros do is "not the same as other programming
environments".

> Yes, you can write programs in Mathematica very easily in a huge number
> of various approaches. But will they come when you call them? Can you
> really be sure what they do? That you can avoid name capture? That
> things don't depend on the exact structure of the expressions you pass
> in? That you really made the right choice between Hold and Unevaluated?

I wrote most of my PhD in Mathematica and it worked superbly. Mathematica
also has many happy users. Indeed, my old department are apparently looking
into a site license. From my point of view, the main source of bugs in
Mathematica code is due to the lack of static type checking.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <1124554424.054080.170910@g14g2000cwa.googlegroups.com>
Jon Harrop wrote:
> ··············@hotmail.com wrote:
> > My personal "favorite" is Apply, where I had not defined a function
> > "g". Check out Mathematica's documentation
> >
> > Apply[Plus, g[a,b]] --> a+b
> >
> > WTF! Hey, what happened to my function g? And, amazingly enough, at
> > least some folks at Wolfram think this is what Lisp does,
>
> Let's trying writing a Lisp macro equivalent to Mathematica's Apply.

You miss the point entirely.  A wolf in sheep's clothing doesn't eat
grass.

Lisp has apply already, it is an ordinary function, and it takes as
arguments a function and a list (in the simplest case), and returns the
result of the function taking each element of the list as an argument.
It's extremely useful, and basic to the Lisp philosophy.

Mathematica has an "Apply" that specifies transformation rules that
take a symbol and an expression-tree and do surgery on the expression
tree to stick the symbol in.  That ain't function application in my
dictionary. I'm guessing it doesn't get used much in Mathematica
applications, but was stuck in because someone had a fuzzy memory of
Lisp or Scheme, and thought it might be nice to have.

You seem not to distinguish between "functions" and "symbols
Mathematica uses to name functions, but also have various attributes
which determine their pattern-matching semantics." That is symptomatic
of your confusion between ordinary programming semantics,
pattern-matching, and Lisp macro manipulations. Until you learn to keep
them distinct, comparing Lisp, OCaml, and Mathematica will be very
confusing.

I've used Mathematica for a reasonable amount of work as well, but
"getting work done" in Mathematica is very different from "programming"
in Lisp. It has a totally different design philosophy.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <43075db8$0$1288$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
> Lisp has apply already, it is an ordinary function, and it takes as
> arguments a function and a list (in the simplest case), and returns the
> result of the function taking each element of the list as an argument.
> It's extremely useful, and basic to the Lisp philosophy.

Lisp's "apply" is a special case of Mathematica's "Apply" that only works
when "g = List". Wade's macro is a better Lisp equivalent of Mathematica's
"Apply".

> Mathematica has an "Apply" that specifies transformation rules that
> take a symbol and an expression-tree and do surgery on the expression
> tree to stick the symbol in.  That ain't function application in my
> dictionary.

For example:

In[1]:= Apply[Plus, {1, 2, 3}]

Out[1]= 6

is equivalent to the Lisp:

* (apply '+ '(1 2 3))
6

Lisp's "apply" only handles lists. Mathematica's "Apply" handles any kind of
non-atomic expression. For example:

In[1]:= Apply[f, g[1, 2, 3]]

Out[1]= f[1, 2, 3]

> I'm guessing it doesn't get used much in Mathematica 
> applications, but was stuck in because someone had a fuzzy memory of
> Lisp or Scheme, and thought it might be nice to have.

I've used Apply (written ·@@" in infix notation) quite a lot.

> You seem not to distinguish between "functions" and "symbols
> Mathematica uses to name functions, but also have various attributes
> which determine their pattern-matching semantics." That is symptomatic
> of your confusion between ordinary programming semantics,
> pattern-matching, and Lisp macro manipulations. Until you learn to keep
> them distinct, comparing Lisp, OCaml, and Mathematica will be very
> confusing.

It is an equivalence, not a "confusion".

> I've used Mathematica for a reasonable amount of work as well, but
> "getting work done" in Mathematica is very different from "programming"
> in Lisp. It has a totally different design philosophy.

Yes. I would imagine that most Lisp code is run-time and not macros.
Mathematica code is effectively all Lisp macros.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <1124565642.851909.251130@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> ··············@hotmail.com wrote:

>
> Lisp's "apply" is a special case of Mathematica's "Apply" that only works
> when "g = List". Wade's macro ...

Absolutely and completely not. The definition of a function INCLUDES
THE DOMAIN. Lisp apply takes a Lisp function object as one of the
arguments. It is a function, crudely speaking on the domain of

lisp-functions \cross lisp-value^n \to lisp-value

Mathematica's Apply acts on a different domain. The fact that Wade had
to define a LISP MACRO to simulate Mathematica Apply means that it
CANNOT be equivalent to Lisp apply, which is a LISP FUNCTION.

LISP MACROS do NOT include LISP FUNCTIONS as "special cases."
Macro-expander-functions act on the space

lisp-s-expressions \to lisp-s-expressions.

lisp-functions are NOT elements of lisp-s-expressions.

No Lisp macro can do the same thing as apply.

> Lisp's "apply" only handles lists.

There's no "only" about it. lisp #'apply takes functions and arguments
and, pay *careful* attention here, A-P-P-L-I-E-S the function to the
arguments.

It doesn't take an expression and re-arrange it to look nicer, it calls
a damn function.


> > You seem not to distinguish between "functions" and "symbols
> > Mathematica uses to name functions, but also have various attributes
> > which determine their pattern-matching semantics." That is symptomatic
> > of your confusion between ordinary programming semantics,
> > pattern-matching, and Lisp macro manipulations. Until you learn to keep
> > them distinct, comparing Lisp, OCaml, and Mathematica will be very
> > confusing.
>
> It is an equivalence, not a "confusion".

There is no equivalence, unless you expand your domain of definition to
the universe of Mathematica expressions. "Plus" in Mathematica is a set
of rules. There isn't a single function object that I can get there.

Compare to Lisp's #'+ . A simple function. No "Listable" attribute, no
rules indicating it is commutative, blah, blah, blah, just the function
that takes *numbers* and adds them to produce numbers. It doesn't take
expressions, it doesn't take polynomials, it takes *numbers* and
returns numbers. I will always get a number out (or an error condition)
if I apply it to something. No surprises, no non-deterministic
evaluation of the argument.

What's the domain of Mathematica's Plus? Absolutely anything.
Plus[godknowswhat] might evaluate to Plus[godknowswhat] or 3, if
godknowswhat happens to evaluate to 3.
From: Jon Harrop
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <4307d2b2$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
> ...
> No Lisp macro can do the same thing as apply.

So you do not think that Mathematica's evaluation strategy can be
implemented using Lisp macros? I was under the impression that we had
developed just that macro elsewhere in this thread. Can you explain why you
think it is wrong/different?

> Compare to Lisp's #'+ . A simple function. No "Listable" attribute, no
> rules indicating it is commutative, blah, blah, blah, just the function
> that takes *numbers* and adds them to produce numbers. It doesn't take
> expressions, it doesn't take polynomials, it takes *numbers* and
> returns numbers. I will always get a number out (or an error condition)
> if I apply it to something. No surprises, no non-deterministic
> evaluation of the argument.

Yes. I'm not interested in addition because I have that in other
languages...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <ZHSNe.293946$xm3.170252@attbi_s21>
In article <························@ptn-nntp-reader02.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> So you do not think that Mathematica's evaluation strategy can be
> implemented using Lisp macros? I was under the impression that we had
> developed just that macro elsewhere in this thread. Can you explain why you
> think it is wrong/different?

I don't understand your fascination for doing things with macros.

Okay, listen up:

A macro in Common Lisp is simply a function that transforms code.  By
definition you can do anything with macros that you can do with Lisp
functions.  The only difference is that the computations you do in a
macro definition might happen before runtime (i.e., the macros might be
expanded (the macro functions run) by COMPILE.)

See:

CL-USER 1 > (defun frobnicate (whole environment)
              (declare (ignore environment))
              (reverse (second whole)))
FROBNICATE

;;; KLUDGE - I'm not sure if the following is allowed by the CL spec,
;;; but it works here.
CL-USER 2 > (setf (macro-function 'frob1) #'frobnicate)
#'(LAMBDA (WHOLE ENVIRONMENT) (DECLARE (LAMBDA-NAME FROBNICATE) (IGNORE ENVIRONMENT)) (BLOCK FROBNICATE (REVERSE (SECOND WHOLE))))

CL-USER 3 > (frob1 (2 2 +))
4

CL-USER 4 > (defmacro frob2 (form)
              (reverse form))
FROB2

CL-USER 5 > (frob2 (2 2 +))
4

So yeah, you could write macros such that:

(macroexpand-1 '(deep-thought))

would spin the CPU for 7.5 million years and then spit out 42, but this
is a damn silly way to write code.

Sorry if this ruins the magic for you...

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <4307ffef$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Brian Downing wrote:
> I don't understand your fascination for doing things with macros.

I think I'm beginning to understand...

> Okay, listen up:
> 
> A macro in Common Lisp is simply a function that transforms code.  By
> definition you can do anything with macros that you can do with Lisp
> functions.

Yes, it's the vice-versa that I had misunderstood. I didn't realise ordinary
functions could do anything that macros could. My belief, of course,
stemmed from the separation of macros and code in OCaml...

> The only difference is that the computations you do in a 
> macro definition might happen before runtime (i.e., the macros might be
> expanded (the macro functions run) by COMPILE.)

Right.

> So yeah, you could write macros such that:
> 
> (macroexpand-1 '(deep-thought))
> 
> would spin the CPU for 7.5 million years and then spit out 42, but this
> is a damn silly way to write code.

Yes. This begs the question why use macros when you could just use ordinary
functions? My guess is that macros let you partially specialise your code.

> Sorry if this ruins the magic for you...

Not at all, thanks for the clarification.

So, at least to start with, I should probably forget macros altogether?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <EwTNe.52824$084.17012@attbi_s22>
In article <························@ptn-nntp-reader02.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> > So yeah, you could write macros such that:
> > 
> > (macroexpand-1 '(deep-thought))
> > 
> > would spin the CPU for 7.5 million years and then spit out 42, but this
> > is a damn silly way to write code.
> 
> Yes. This begs the question why use macros when you could just use ordinary
> functions? 

*rolls eyes*

Because calling functions by hand and then manually copying and pasting
the resultant generated code into your program would get old pretty
quick?

Perhaps you should read "The Story of Mac" here:

http://www.gigamonkeys.com/book/macros-defining-your-own.html

> > Sorry if this ruins the magic for you...
> 
> Not at all, thanks for the clarification.
> 
> So, at least to start with, I should probably forget macros altogether?

I think you should expand you knowledge of Common Lisp basics.  Go read
the above book.

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <43080a9c$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Brian Downing wrote:
> In article <························@ptn-nntp-reader02.plus.net>,
> Jon Harrop  <······@jdh30.plus.com> wrote:
>> Yes. This begs the question why use macros when you could just use
>> ordinary functions?
> 
> *rolls eyes*
> 
> Because calling functions by hand and then manually copying and pasting
> the resultant generated code into your program would get old pretty
> quick?
> 
> Perhaps you should read "The Story of Mac" here:
> 
> http://www.gigamonkeys.com/book/macros-defining-your-own.html

Ok. I've read that but it just seems to reinforce what I said before. Macros
are expanded before run-time so they provide partial specialisation. The
manual alternative is a HOF, not cut and paste. There is no difference in
result.

>> So, at least to start with, I should probably forget macros altogether?
> 
> I think you should expand you knowledge of Common Lisp basics.  Go read
> the above book.

That does look like a mighty fine book... :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jon Harrop
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <4308941c$0$97102$ed2619ec@ptn-nntp-reader03.plus.net>
Jon Harrop wrote:
> Ok. I've read that but it just seems to reinforce what I said before.
> Macros are expanded before run-time so they provide partial
> specialisation. The manual alternative is a HOF, not cut and paste. There
> is no difference in result.

I think I just twigged. Macros are often used to partially specialise HOFs
but they can do a lot more, i.e. they make syntactic abstraction easier and
more efficient. Is that right?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <TRcOe.297290$xm3.266805@attbi_s21>
In article <·························@ptn-nntp-reader03.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> I think I just twigged. Macros are often used to partially specialise HOFs
> but they can do a lot more, i.e. they make syntactic abstraction easier and
> more efficient. Is that right?

Yep, pretty much.

For example, nicer-looking DSLs:

(loop for x below 10 and y from 25
      doing (format t "~A Hello world ~A" x y))

As opposed to a (badly thought out) theoretical HOF loop:

(loop '((:below 10)
        (:from 25))
      (lambda (x y) (format t "~A Hello world ~A" x y)))

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <43099863$0$17485$ed2e19e4@ptn-nntp-reader04.plus.net>
Brian Downing wrote:
> For example, nicer-looking DSLs:
> 
> (loop for x below 10 and y from 25
>       doing (format t "~A Hello world ~A" x y))
> 
> As opposed to a (badly thought out) theoretical HOF loop:
> 
> (loop '((:below 10)
>         (:from 25))
>       (lambda (x y) (format t "~A Hello world ~A" x y)))

I see. You can't do that in OCaml unless you use symbolic infix operators
(e.g. "=>") instead of words. SML allows arbitrary infix operators to be
defined, even with associativities and precedences. So you can do things
like that in SML:

http://www.mlton.org/ForLoops

I assume this still isn't as general as a Lisp macro though. It certainly
seems like a bit of a hack...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Paul F. Dietz
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <-K6dnWkh7PcIKJTeRVn-vg@dls.net>
Jon Harrop wrote:

> I assume this still isn't as general as a Lisp macro though.

As an example of what you can do with lisp macros,
look at Waters' COVER package.  It (temporarily)
shadows some of the defining macros so they install
code to record branch coverage..  Doing the same thing
in other languages typically requires separate
preprocessors or hacking the compiler.

http://www.merl.com/publications/TR1991-004/

	Paul
From: Nathan Baum
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <dedrp9$2nr$1@news7.svr.pol.co.uk>
Jon Harrop wrote:
> Jon Harrop wrote:
> 
>>Ok. I've read that but it just seems to reinforce what I said before.
>>Macros are expanded before run-time so they provide partial
>>specialisation. The manual alternative is a HOF, not cut and paste. There
>>is no difference in result.
> 
> 
> I think I just twigged. Macros are often used to partially specialise HOFs
> but they can do a lot more, i.e. they make syntactic abstraction easier and
> more efficient. Is that right?
> 

Quite correct.

Consider

   (defstruct foo
     bar)

And compare with something roughly equivalent:

   (defun make-foo (&key bar)
     (list 'foo 'bar bar))
   (defun foo-p (foo)
     (and (consp foo)
       (eq (car foo) 'foo)))
   (deftype foo () '(satisfies foo-p))
   (defun foo-bar (foo)
     (declare (foo foo))
     (etypecase foo
       (foo (getf (cdr foo) 'bar))))
   (defsetf foo-bar (foo) (val)
     (declare (foo foo))
     `(etypecase ,foo
       (foo (setf (getf (cdr ,foo) 'bar) ,val))))
   (internal::register-struct-reader 'foo #'make-foo)

Aside from the issue that structures should be distinct types, whilst 
this manual expansion of defstruct has 'foo' being a particular form of 
list, this is plainly far more verbose than is desirable for a simple 
structure definition.

It's also inefficient, and not only because the structures are 
represented as lists: additionally a "satisfies" type definition might 
well confuse your type inferer, so calls to foo-bar where you /know/ the 
argument is a foo will probably still involve a runtime type check.

Additionally, it also relies upon nonportable knowledge of how to 
register itself with the structure reader: The structure can be read as 
a literal with the syntax #S(FOO :BAR <VALUE>). Note that the 
registering function in my example is made up and is unlikely to work on 
any Lisp implementation anywhere.

You /could/ use a HOF,

   (hof-defstruct 'foo 'bar)

But that would mean losing whatever type inference you may get, and 
therefore the efficiency gains from type inference, since there's no way 
that the "foo" type would be known at compile time:

Plus, you'd get warnings from the compiler about undefined functions, 
since make-foo, foo-bar, et al, wouldn't be defined until runtime.

Another important distinction between macros in Lisp and macros in OCaml 
is that whilst both Lisp macros and campl4 can perform arbitrary 
transformations upon ASTs of their respective languages, Lisp macros 
produce Lisp whilst campl4 macros produce OCaml.

Lisp has a variety of ugly low-level control structures which are left 
out of OCaml because they are subject to abuse, or don't sit well with 
the functional paradigm. OCaml lacks block/return-from, catch/throw (not 
related to try/raise), and tagbody/go.

In retrospect this makes sense: OCaml didn't have campl4 initially, and 
so the designers didn't plan on encapsulating such forms within 
user-defined higher-level forms.

On GameDev, I gave an example of a simple exception handling system 
implemented in only tens of lines of code.

Lisp itself has a very sophisticated _condition_ system which supports 
not only the usual uncorrectable unignorable conditions of OCaml, C++, 
and many other languages, but also allows for conditions which may be 
ignored (these are basically _warnings_) and also for _restarts_, which 
allow a function which handles a condition to indicate that processing 
may continue further down the call stack.

Despite the complexity of Lisp's condition system, a Lisp without one 
(but otherwise complete) could be given an exact clone using macros. It 
would likely only amount to a few hundred lines of code.

It would be nontrivial to do the equivalent in campl4, because OCaml 
itself doesn't have the non-local control flow structures that you need 
to build a condition system as sophisticated as Lisp's. OCaml's own 
exception handling can imitate it to a degree, but when you introduce 
warnings and restarts, it becomes
From: Pascal Bourguignon
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <87r7cl5tn2.fsf@thalassa.informatimago.com>
Nathan Baum <···········@btinternet.com> writes:

> Jon Harrop wrote:
>> Jon Harrop wrote:
>> 
>>>Ok. I've read that but it just seems to reinforce what I said before.
>>>Macros are expanded before run-time so they provide partial
>>>specialisation. The manual alternative is a HOF, not cut and paste. There
>>>is no difference in result.
>> I think I just twigged. Macros are often used to partially
>> specialise HOFs
>> but they can do a lot more, i.e. they make syntactic abstraction easier and
>> more efficient. Is that right?
>> 
>
> Quite correct.
>
> Consider
>
>    (defstruct foo
>      bar)
>
> And compare with something roughly equivalent:
>
>    (defun make-foo (&key bar)
>      (list 'foo 'bar bar))
>    (defun foo-p (foo)
>      (and (consp foo)
>        (eq (car foo) 'foo)))
>    (deftype foo () '(satisfies foo-p))
>    (defun foo-bar (foo)
>      (declare (foo foo))
>      (etypecase foo
>        (foo (getf (cdr foo) 'bar))))
>    (defsetf foo-bar (foo) (val)
>      (declare (foo foo))
>      `(etypecase ,foo
>        (foo (setf (getf (cdr ,foo) 'bar) ,val))))
>    (internal::register-struct-reader 'foo #'make-foo)
> [...]

"Roughly". Instead of noting the deficiencies, why don't you show some
lazyness and as your favorite Common Lisp implementation what it think
of it:

[100]> (macroexpand '(defstruct foo bar))
(EVAL-WHEN (:LOAD-TOPLEVEL :COMPILE-TOPLEVEL :EXECUTE)
 (LET NIL
  (LET
   ((#:G4750
     (CONS 'FOO (CLOS::CLASS-NAMES (GET 'STRUCTURE-OBJECT 'CLOS::CLOSCLASS)))))
   (SYSTEM::STRUCTURE-UNDEFINE-ACCESSORIES 'FOO)
   (REMPROP 'FOO 'SYSTEM::DEFSTRUCT-DESCRIPTION)
   (CLOS::DEFINE-STRUCTURE-CLASS 'FOO #:G4750 'MAKE-FOO 'NIL 'COPY-FOO 'FOO-P
    (LIST
     (CLOS::MAKE-INSTANCE-<STRUCTURE-EFFECTIVE-SLOT-DEFINITION>
      CLOS::<STRUCTURE-EFFECTIVE-SLOT-DEFINITION> :NAME 'BAR :INITARGS '(:BAR)
      :TYPE 'T :ALLOCATION ':INSTANCE 'CLOS::INHERITABLE-INITER
      (CLOS::MAKE-INHERITABLE-SLOT-DEFINITION-INITER 'NIL
       (SYSTEM::MAKE-CONSTANT-INITFUNCTION NIL))
      'CLOS::INHERITABLE-DOC '(NIL) 'CLOS::LOCATION '1 'CLOS::READONLY 'NIL))
    (LIST
     (CLOS::MAKE-INSTANCE-<STRUCTURE-DIRECT-SLOT-DEFINITION>
      CLOS::<STRUCTURE-DIRECT-SLOT-DEFINITION> :NAME 'BAR :INITARGS '(:BAR)
      :TYPE 'T :ALLOCATION ':INSTANCE 'CLOS::INHERITABLE-INITER
      (CLOS::MAKE-INHERITABLE-SLOT-DEFINITION-INITER 'NIL
       (SYSTEM::MAKE-CONSTANT-INITFUNCTION NIL))
      'CLOS::INHERITABLE-DOC '(NIL) :READERS '(FOO-BAR) :WRITERS
      '((SETF FOO-BAR)))))
   (DEFUN MAKE-FOO (&KEY (#:BAR NIL))
    (LET ((SYSTEM::OBJECT (SYSTEM::%MAKE-STRUCTURE #:G4750 2)))
     (SETF (SYSTEM::%STRUCTURE-REF 'FOO SYSTEM::OBJECT 1) (THE T #:BAR))
     SYSTEM::OBJECT)))
  (PROCLAIM '(INLINE FOO-P))
  (DEFUN FOO-P (SYSTEM::OBJECT) (SYSTEM::%STRUCTURE-TYPE-P 'FOO SYSTEM::OBJECT))
  (PROCLAIM '(INLINE COPY-FOO))
  (DEFUN COPY-FOO (STRUCTURE) (COPY-STRUCTURE STRUCTURE))
  (PROCLAIM '(FUNCTION FOO-BAR (FOO) T)) (PROCLAIM '(INLINE FOO-BAR))
  (DEFUN FOO-BAR (SYSTEM::OBJECT)
   (THE T (SYSTEM::%STRUCTURE-REF 'FOO SYSTEM::OBJECT 1)))
  (SYSTEM::%PUT 'FOO-BAR 'SYSTEM::DEFSTRUCT-READER 'FOO)
  (PROCLAIM '(FUNCTION (SETF FOO-BAR) (T FOO) T))
  (PROCLAIM '(INLINE (SETF FOO-BAR)))
  (DEFUN (SETF FOO-BAR) (SYSTEM::VALUE SYSTEM::OBJECT)
   (SYSTEM::%STRUCTURE-STORE 'FOO SYSTEM::OBJECT 1 SYSTEM::VALUE))
  (SYSTEM::%PUT 'FOO-BAR 'SYSTEM::DEFSTRUCT-WRITER 'FOO)
  (SYSTEM::%SET-DOCUMENTATION 'FOO 'TYPE NIL)
  (CLOS::DEFSTRUCT-REMOVE-PRINT-OBJECT-METHOD 'FOO) 'FOO)) ;
T

Of course, an implementation could as well macroexpand it to:

(EVAL-WHEN (:LOAD-TOPLEVEL :COMPILE-TOPLEVEL :EXECUTE)
  (system::define-structure 'foo '(bar)))

so the argument on the size of the expansion must include the called
functions too...  What we resume with the term "abstraction".


> You /could/ use a HOF,
>
>    (hof-defstruct 'foo 'bar)
>
> But that would mean losing whatever type inference you may get, and
> therefore the efficiency gains from type inference, since there's no
> way that the "foo" type would be known at compile time:

What about:
  (EVAL-WHEN (:LOAD-TOPLEVEL :COMPILE-TOPLEVEL :EXECUTE)
    (hof-defstruct 'foo 'bar)) 
?

The important points, the two defining points, are that macros get run
at compilation time, and in the current lexical environment, (or at
minimal-compilation time in the case of an interpreter).

If EVAL could catch the lexical environment and EVAL-WHEN could
function deep in s-expressions (instead of only on the toplevel), then
we could do without macros:

(mapcar
  (lambda (x)
    (eval-in-current-enviroment
      (eval-when (:compile)
        (hof-cond '((oddp x) (print :odd))
                  '((odd (truncate x 2)) (print :doubly-odd))))))
  '(1 2 3 4))

But you'd still be wanting macros to hide this (eval/ce (eval-when ...)) stuff.



-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
Our enemies are innovative and resourceful, and so are we. They never
stop thinking about new ways to harm our country and our people, and
neither do we. -- Georges W. Bush
From: Nathan Baum
Subject: Re: Lisp semantics vs. Mathematica semantics
Date: 
Message-ID: <dee1vv$h4b$1@newsm1.svr.pol.co.uk>
Pascal Bourguignon wrote:
> Nathan Baum <···········@btinternet.com> writes:
> 
> 
>>Jon Harrop wrote:
>>
>>>Jon Harrop wrote:
>>>
>>>
>>>>Ok. I've read that but it just seems to reinforce what I said before.
>>>>Macros are expanded before run-time so they provide partial
>>>>specialisation. The manual alternative is a HOF, not cut and paste. There
>>>>is no difference in result.
>>>
>>>I think I just twigged. Macros are often used to partially
>>>specialise HOFs
>>>but they can do a lot more, i.e. they make syntactic abstraction easier and
>>>more efficient. Is that right?
>>>
>>
>>Quite correct.
>>
>>Consider
>>
>>   (defstruct foo
>>     bar)
>>
>>And compare with something roughly equivalent:
>>
>>   (defun make-foo (&key bar)
>>     (list 'foo 'bar bar))
>>   (defun foo-p (foo)
>>     (and (consp foo)
>>       (eq (car foo) 'foo)))
>>   (deftype foo () '(satisfies foo-p))
>>   (defun foo-bar (foo)
>>     (declare (foo foo))
>>     (etypecase foo
>>       (foo (getf (cdr foo) 'bar))))
>>   (defsetf foo-bar (foo) (val)
>>     (declare (foo foo))
>>     `(etypecase ,foo
>>       (foo (setf (getf (cdr ,foo) 'bar) ,val))))
>>   (internal::register-struct-reader 'foo #'make-foo)
>>[...]
> 
> 
> "Roughly". Instead of noting the deficiencies, why don't you show some
> lazyness and as your favorite Common Lisp implementation what it think
> of it:

Mainly because my Common Lisp implementation (SBCL) doesn't expand it 
into something which I could enter into the reader again. And I figured 
it would be more useful to show what _usable_ code which did the same 
thing would look like.

I could have written

   (eval (macroexpand '(defstruct foo bar))

but that would really have missed the point, I think.

>>You /could/ use a HOF,
>>
>>   (hof-defstruct 'foo 'bar)
>>
>>But that would mean losing whatever type inference you may get, and
>>therefore the efficiency gains from type inference, since there's no
>>way that the "foo" type would be known at compile time:
> 
> 
> What about:
>   (EVAL-WHEN (:LOAD-TOPLEVEL :COMPILE-TOPLEVEL :EXECUTE)
>     (hof-defstruct 'foo 'bar)) 
> ?

Ah yes. I haven't fully grokked EVAL-WHEN yet.
From: Bruce Stephens
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <87wtmgv5jb.fsf@cenderis.demon.co.uk>
Jon Harrop <······@jdh30.plus.com> writes:

[...]

> In[1]:= Apply[Plus, {1, 2, 3}]
>
> Out[1]= 6

That makes sense.

[...]

> Lisp's "apply" only handles lists. Mathematica's "Apply" handles any kind of
> non-atomic expression. For example:
>
> In[1]:= Apply[f, g[1, 2, 3]]
>
> Out[1]= f[1, 2, 3]

That strikes me as bizarre, unless g is some kind of data-structuring
operation (such as list)?

(apply '+ (list 1 2 3)) => 6

[...]

> Yes. I would imagine that most Lisp code is run-time and not macros.
> Mathematica code is effectively all Lisp macros.

Macros aren't common at runtime, no.  That's rather the point: they're
expanded before then.
From: ··············@hotmail.com
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <1124566339.513748.50970@g47g2000cwa.googlegroups.com>
Bruce Stephens wrote:
> Jon Harrop <······@jdh30.plus.com> writes:.
>
> [...]. Mathematica's "Apply" handles any kind of
> > non-atomic expression. For example:
> >
> > In[1]:= Apply[f, g[1, 2, 3]]
> >
> > Out[1]= f[1, 2, 3]
>
> That strikes me as bizarre, unless g is some kind of data-structuring
> operation (such as list)?

No, the point is that *if g doesn't have any applicable transformation
rules*, g[1,2,3] returns the same expression, which is, in Lisp
notation '(g 1 2 3). Mathematica Apply takes the result of evaluating
it's argument and removes the "head" (which I've notated as the car of
the Lisp list, g).

If g[1,2,3] is defined to transform to List[2,4,6], (in Lisp '(list 2 4
6) ), then Apply will remove the head "List" and substitute the first
argument "f" resulting in '(f 2 4 6). Then, because Mathematica keeps
evaulating stuff until it stops, '(f 2 4 6) will be often be evaulated
by applying the function f to the arguments. E.g. if f were Plus,
'(plus 2 4 6) would return 12.

Of course this is utterly freaky to any Lisp programmer, so it is a
good thing they include such an example in the documentation. Apply is
a special-case transformation that exploits the treatment of the "head"
in expressions as a function call. Unless it is not a function call, of
course, in which case, you get the bizarre behavior.
From: Bruce Stephens
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <87slx4v12b.fsf@cenderis.demon.co.uk>
···············@hotmail.com" <············@gmail.com> writes:

[...]

> Of course this is utterly freaky to any Lisp programmer, so it is a
> good thing they include such an example in the documentation.  Apply
> is a special-case transformation that exploits the treatment of the
> "head" in expressions as a function call.  Unless it is not a
> function call, of course, in which case, you get the bizarre
> behavior.

Oh, I think I see.  So Apply[f, g[1,2,3]] is kind of a substitution:
applying f rather than g to the arguments?  OK, I can see that.  

(I still don't like the capitalised function names and square brackets
and things, but those are well known idiosyncrasies of Mathematica;
presumably they're no more annoying once you get used to them than
parentheses are to people who don't use lisp.)
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307c5a7$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Bruce Stephens wrote:
> ···············@hotmail.com" <············@gmail.com> writes:
> Oh, I think I see.  So Apply[f, g[1,2,3]] is kind of a substitution:
> applying f rather than g to the arguments?  OK, I can see that.

Yes, exactly. You can imagine how useful things like this are in the context
of manipulating symbolic mathematical expressions.

> (I still don't like the capitalised function names and square brackets
> and things, but those are well known idiosyncrasies of Mathematica;
> presumably they're no more annoying once you get used to them than
> parentheses are to people who don't use lisp.)

Yes. That's true of all languages, of course. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <1124584833.462029.84280@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Bruce Stephens wrote:
> > ···············@hotmail.com" <············@gmail.com> writes:
> > Oh, I think I see.  So Apply[f, g[1,2,3]] is kind of a substitution:
> > applying f rather than g to the arguments?  OK, I can see that.
>
> Yes, exactly. You can imagine how useful things like this are in the context
> of manipulating symbolic mathematical expressions.

Yes, he is correct, but it is only slightly useful in manipulating
symbolic expressions. Compared to the amazing power of apply (making
functions first class data), it's just a bit of list surgery:
(defun math-apply (f arg) (cons f (cdr arg))), where all the macrology
being done in the thread is an attempt to replicate Mathematica's
aggressive *evaluation model* which is NOT a part of Apply. Does that
really qualify as "useful"? Not really. Or you have a very low
threshold for usefulness.

Things like defmacro and Lisp apply are exceedingly useful. I maintain
that Mathematica Apply is a poor substitute.

In your other post, you claim that it is now apparent you could write
something like Mathematica's evaluator in Lisp. While true, you still
seem not to understand that Lisp has a perfectly good evaluator (called
eval, and well-defined by the standard), while Mathematica's evaluator
is an ill-specified, non-deterministic pattern matching engine.

Just because eval and apply have similarly named "false cognates"
Evaluate and Apply in Mathematica does not mean they are the same, or
even similar in their utility or patterns of usage. Mathematica
supports Lispy programming *very* poorly. Lisp supports Lispy
programming *very* well.

For the record, we should make clear that the macro "pattern matching"
in Lisp you've seen in the examples so far is a simple tree-matching
algorithm, as in destructuring-bind, and bears only the faintest
resemblance to Mathematica pattern substitution (or, as far as I know,
OCaml pattern-matching).

For god's sake, there is a lot to learn from Lisp, if you would *LEARN*
it instead of making simplistic drive-by comparisons. Lisp is a whole
programming culture with a rich heritage. Mathematica and OCaml are
upstarts by comparison. Mathematica, for certain, is a classic case of
"in computer science, we stand on our predecessors toes."
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307d1a1$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
> In your other post, you claim that it is now apparent you could write
> something like Mathematica's evaluator in Lisp. While true, you still
> seem not to understand that Lisp has a perfectly good evaluator (called
> eval, and well-defined by the standard), while Mathematica's evaluator
> is an ill-specified, non-deterministic pattern matching engine.

I believe Lisp's evaluator produces values whereas Mathematica's evaluator
produces ASTs.

> Just because eval and apply have similarly named "false cognates"
> Evaluate and Apply in Mathematica does not mean they are the same, or
> even similar in their utility or patterns of usage. Mathematica
> supports Lispy programming *very* poorly. Lisp supports Lispy
> programming *very* well.

Yes, absolutely.

> For the record, we should make clear that the macro "pattern matching"
> in Lisp you've seen in the examples so far is a simple tree-matching
> algorithm, as in destructuring-bind, and bears only the faintest
> resemblance to Mathematica pattern substitution (or, as far as I know,
> OCaml pattern-matching).

OCaml is certainly totally unrelated (camlp4 probably is related but I don't
know it). However, I think Lisp macros and Mathematica are quite similar
here. Specifically, it may be possible to implement Mathematica's pattern
matcher in Lisp as a set of macros.

> For god's sake, there is a lot to learn from Lisp, if you would *LEARN*
> it instead of making simplistic drive-by comparisons. Lisp is a whole
> programming culture with a rich heritage. Mathematica and OCaml are
> upstarts by comparison. Mathematica, for certain, is a classic case of
> "in computer science, we stand on our predecessors toes."

I'm really interesting in the functionality that Lisp provides that is not
already available to me (e.g. in C, OCaml and Mathematica). So symbolic
manipulation using macros is very interesting but first class functions are
not.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <de8h7o$c6$1@news6.svr.pol.co.uk>
Jon Harrop wrote:
> Bruce Stephens wrote:
> 
>>···············@hotmail.com" <············@gmail.com> writes:
>>Oh, I think I see.  So Apply[f, g[1,2,3]] is kind of a substitution:
>>applying f rather than g to the arguments?  OK, I can see that.
> 
> 
> Yes, exactly. You can imagine how useful things like this are in the context
> of manipulating symbolic mathematical expressions.

I can imagine how useful _explicit_ substitution might be. I simply 
can't imagine how

   Apply[f, g[1, 2, 3]]

not being

   f[g[1, 2, 3]]

might be useful in the general case. Particularly considering you could 
easily have something like

   Subst[g[x__] -> f[x__], g[1, 2, 3]]

and then define

   SubstHead[f, g[1, 2, 3]]

in terms of that.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307cb9f$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
> Jon Harrop wrote:
>> Yes, exactly. You can imagine how useful things like this are in the
>> context of manipulating symbolic mathematical expressions.
> 
> I can imagine how useful _explicit_ substitution might be.

Yes. If by "explicit substitution" you mean search and replace then that is
implemented like this:

In[1]:= ReplaceAll[g[1, 2, 3], {g -> f}]

Out[1]= f[1, 2, 3]

> I simply can't imagine how
> 
>    Apply[f, g[1, 2, 3]]
> 
> not being
> 
>    f[g[1, 2, 3]]

But if you wanted the latter then you would just write that rather than
using Apply.

> might be useful in the general case. Particularly considering you could
> easily have something like
> 
>    Subst[g[x__] -> f[x__], g[1, 2, 3]]
> 
> and then define
> 
>    SubstHead[f, g[1, 2, 3]]
> 
> in terms of that.

Yes, exactly. That is:

In[1]:= Replace[g[1, 2, 3], {g[args___] -> f[args]}]

Out[1]= f[1, 2, 3]

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <de8jrr$q7n$1@newsm1.svr.pol.co.uk>
Jon Harrop wrote:
  >
>>I simply can't imagine how
>>
>>   Apply[f, g[1, 2, 3]]
>>
>>not being
>>
>>   f[g[1, 2, 3]]
> 
> 
> But if you wanted the latter then you would just write that rather than
> using Apply.

Whilst that's true, it doesn't IMO constitute a good reason for Apply to 
have what is to me highly counter-intuitive behaviour.

I would expect a function called Apply to apply its first argument to 
its second argument. If it did anything radically different I would be 
surprised, and I am.

The only reason I can imagine for Apply to even exist is if Mathematica 
has a seperate namespace for functions and variables. Then, you'd use 
Apply[f,g] in the same way that a Lisper would use (apply f g), and for 
the same reason.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307d456$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
> Whilst that's true, it doesn't IMO constitute a good reason for Apply to
> have what is to me highly counter-intuitive behaviour.

I think that is simply because you are used to "apply" in Lisp.

> I would expect a function called Apply to apply its first argument to
> its second argument. If it did anything radically different I would be
> surprised, and I am.

Yes. ReplaceHead would have been a better name, albeit vastly more
verbose. ;-)

> The only reason I can imagine for Apply to even exist is if Mathematica
> has a seperate namespace for functions and variables.

It does not, AFAIK.

> Then, you'd use 
> Apply[f,g] in the same way that a Lisper would use (apply f g), and for
> the same reason.

Apply[h, expr] is really just shorthand for:

  Replace[expr, {_[args___] -> h[args]}]

Don't forget that symbolic manipulations like this are everything to
Mathematica (there is no run-time, only macroexpansion) so it has many such
functions.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Bruce Stephens
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <87br3s6sqs.fsf@cenderis.demon.co.uk>
Jon Harrop <······@jdh30.plus.com> writes:

> Bruce Stephens wrote:
>> ···············@hotmail.com" <············@gmail.com> writes:
>> Oh, I think I see.  So Apply[f, g[1,2,3]] is kind of a substitution:
>> applying f rather than g to the arguments?  OK, I can see that.
>
> Yes, exactly. You can imagine how useful things like this are in the context
> of manipulating symbolic mathematical expressions.

I can see uses for that kind of thing.  I think I'd use some kind of
syntax for a rewrite rule: something like Map[g[X]=>f[X], g[1,2,3]],
or something.  I haven't really thought about it, though, so maybe
this one kind of rewrite rule is useful enough that it deserves a name
of its own.

>> (I still don't like the capitalised function names and square brackets
>> and things, but those are well known idiosyncrasies of Mathematica;
>> presumably they're no more annoying once you get used to them than
>> parentheses are to people who don't use lisp.)
>
> Yes. That's true of all languages, of course. :-)

Well, all have their peculiarities.  Mathematica seems to have (to my
eyes, anyway) some fairly odd ones.  I used (once upon a time) Maple,
Matlab, Reduce, Macsyma, and their syntaxes look rather more natural
to me.  I guess it's no big deal, but the choices seem odd to me.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307cff3$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Bruce Stephens wrote:
> I can see uses for that kind of thing.  I think I'd use some kind of
> syntax for a rewrite rule: something like Map[g[X]=>f[X], g[1,2,3]],
> or something.  I haven't really thought about it, though, so maybe
> this one kind of rewrite rule is useful enough that it deserves a name
> of its own.

Yes, that's exactly it. Mathematica is designed from the ground up to
manipulate symbolic mathematical expressions and it really excels at it.
Apply is one example. Your example would use either Replace, ReplaceAll or
ReplaceRepeated depending whether you wanted to replace without recursion,
replace all at same depth or replace recursively and repeatedly until the
input stops changing.

For example, the Fibonacci function can be written using rewrite rules in
Mathematica:

In[1]:= ReplaceRepeated[Fib[10], {Fib[n_] :> If[n<3, 1, Fib[n-1]+Fib[n-2]]}]

Out[1]= 55

Better than that, you can see the evaluation by repeating the use of
ReplaceAll yourself:

In[1]:= ReplaceAll[Fib[10], {Fib[n_] :> If[n<3, 1, Fib[n-1]+Fib[n-2]]}]

Out[1]= Fib[8] + Fib[9]

In[1]:= ReplaceAll[%, {Fib[n_] :> If[n<3, 1, Fib[n-1]+Fib[n-2]]}]

Out[1]= Fib[6] + 2 Fib[7] + Fib[8]

As other people have hinted, Mathematica also tries to apply its own
built-in replacement rules. So it will replace 2+3 with 5, for example.

So can we implement a Mathematica-like evaluation strategy in Lisp using
macros? Before I'm going to ask any more questions I'll read some of the
links that people have posted...

>>> (I still don't like the capitalised function names and square brackets
>>> and things, but those are well known idiosyncrasies of Mathematica;
>>> presumably they're no more annoying once you get used to them than
>>> parentheses are to people who don't use lisp.)
>>
>> Yes. That's true of all languages, of course. :-)
> 
> Well, all have their peculiarities.  Mathematica seems to have (to my
> eyes, anyway) some fairly odd ones.  I used (once upon a time) Maple,
> Matlab, Reduce, Macsyma, and their syntaxes look rather more natural
> to me.  I guess it's no big deal, but the choices seem odd to me.

Yes. I'd say that Mathematica's peculiarities are mainly down to its
not-quite LALR(1) grammar. For example, prefix "!" changes meaning if it is
at the beginning of a line. Input is parsed including progressively more
lines until it first makes sense (i.e. parses without error) at which point
it is interpreted. Patterns are lexer rather than parsed.

I must say that all of these "`", "'", ",", ·@", "#" and brackets have me
confused though. ;-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: David Golden
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <nIKNe.4190$R5.578@news.indigo.ie>
Jon Harrop wrote:

> Yes. I would imagine that most Lisp code is run-time and not macros.
> Mathematica code is effectively all Lisp macros.
> 
But do note that lisp "apply" [1] is a lisp function, not a macro, and
therefore belongs in your "run-time" category!

That link [1] will take you to the common lisp specification, by the
way, it might help if you glanced through  it rather than trying to
shoehorn your understanding of lisp into your existing mental framework
for ML.  One thing to bear in  mind - when you see the word "type",
remember that in lisp, it's officially data that have types*, and
thinking "datatype" instead of "type" when you see the word "type" in a
lisp context** may help you if you're coming from ML. 

[1] http://www.lispworks.com/documentation/HyperSpec/Body/f_apply.htm

* From HyperSpec glossary:
object n. 1. any Lisp datum.
type n. 1. a set of objects, usually with common structure, behavior, or
purpose.

** You might still catch lispers saying e.g. 'a variable x is of type
"double-float"', but in understanding lisp compiler behaviour, it helps
to remember that is shorthand for "I've told lisp that x is a variable
that I promise will only ever refer to data of [data]type double-float" 
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307c8e8$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
David Golden wrote:
> Jon Harrop wrote:
>> Yes. I would imagine that most Lisp code is run-time and not macros.
>> Mathematica code is effectively all Lisp macros.
>
> But do note that lisp "apply" [1] is a lisp function, not a macro, and
> therefore belongs in your "run-time" category!

Yes, very true. The Lisp equivalent would have to be a macro.

Thanks for the links. I'll check them out.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <cJHNe.143962$wr.118542@clgrps12>
Jon Harrop wrote:

> Here's my puny attempt at a Lisp conversion that only works for
> single-argument functions:
> 
>   (defmacro mapply (`f `(g arg))
>     `(,f ,arg))
> 

Your macro is wrong, here is a corrected version and a multi-arg
version

(defmacro mapply (f (g arg))
   (declare (ignore g))
   `(,f ,arg))

(defmacro mapply (f (g &rest args))
   (declare (ignore g))
   `(,f ,@args))

CL-USER 2 > (mapply + (g 3 10))
13

CL-USER 3 >


Wade
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <43075206$0$1288$ed2619ec@ptn-nntp-reader02.plus.net>
Wade Humeniuk wrote:
> Your macro is wrong

Yes indeed. :-)

> (defmacro mapply (f (g &rest args))
>    (declare (ignore g))
>    `(,f ,@args))

Wow, thanks for fixing it! :-)

Ok, so I think this does basically the same thing as Mathematica's Apply. Do
you have to worry about inserting "funcall" in Lisp?

So the syntactic equivalents are:

g[args___]    (g &rest args)
_             g ... (declare (ignore g))
f[args]       `(,f ,@args)

What if you wanted an equivalent of Mathematicas "args__" pattern, that
matches one or more arguments? The OCaml run-time equivalent is "_ :: _ as
args".

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <DNONe.231968$on1.61566@clgrps13>
Jon Harrop wrote:

> 
> What if you wanted an equivalent of Mathematicas "args__" pattern, that
> matches one or more arguments? The OCaml run-time equivalent is "_ :: _ as
> args".
> 

I assume you mean there must be at least one arg...

(defmacro mapply (f (g non-optional-arg &rest optional-args))
   (declare (ignore g))
   `(,f ,non-optional-arg ,@optional-args))


CL-USER 1 > (mapply + (g 1 2 3))
6

CL-USER 2 > (mapply + (g 1))
1

CL-USER 3 > (mapply + (g))

Error: syntax error in (MAPPLY + (G)):
The call (MAPPLY + (G)) does not match the definition of MAPPLY in subpattern (G 
NON-OPTIONAL-ARG &REST OPTIONAL-ARGS) : (G)
   1 (abort) Return to level 0.
   2 Return to top loop level 0.

Type :b for backtrace, :c <option number> to proceed,  or :? for other options

CL-USER 4 : 1 > :a

CL-USER 5 >

Wade
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307ca5f$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Wade Humeniuk wrote:
> I assume you mean there must be at least one arg...

Yes.

> (defmacro mapply (f (g non-optional-arg &rest optional-args))
>    (declare (ignore g))
>    `(,f ,non-optional-arg ,@optional-args)) 

Ah, I see. Excellent, thanks. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <430764db$0$17506$ed2e19e4@ptn-nntp-reader04.plus.net>
Wade Humeniuk wrote:
> Your macro is wrong, here is a corrected version and a multi-arg
> version ...

I've just been playing with it a bit and I had overlooked a mistake in your
macro. It should return an unevaluated expression.

Here's another go:

* (defmacro mapply (f (_ &rest args))
  (declare (ignore _))
  `'(,f ,@args))

MAPPLY
* (mapply + (g 3 10))

(+ 3 10)

That's better. Now we need to mimic the Mathematica evaluator using another
macro that reduces expressions like "3+10". I think Lisp's built-in "eval"
will suffice. So we have:

* (eval (mapply + (g 3 10)))

13

There. :-)

I'm still confused about the quotes. Why do I need `' on the last line of
the macro definition?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: M Jared Finder
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <ctCdnWWqyoe3EZreRVn-pA@speakeasy.net>
Jon Harrop wrote:
> Wade Humeniuk wrote:
> 
>>Your macro is wrong, here is a corrected version and a multi-arg
>>version ...
> 
> I've just been playing with it a bit and I had overlooked a mistake in your
> macro. It should return an unevaluated expression.
> 
> Here's another go:
> 
> * (defmacro mapply (f (_ &rest args))
>   (declare (ignore _))
>   `'(,f ,@args))
> 
> MAPPLY
> * (mapply + (g 3 10))
> 
> (+ 3 10)
> 
> That's better. Now we need to mimic the Mathematica evaluator using another
> macro that reduces expressions like "3+10". I think Lisp's built-in "eval"
> will suffice. So we have:

EVAL is not a macro.  EVAL is a function.  Lisp macros allow you to 
write your own Lisp special forms, without having to rewrite the 
function EVAL.

> 
> * (eval (mapply + (g 3 10)))
> 
> 13
> 
> There. :-)
> 
> I'm still confused about the quotes. Why do I need `' on the last line of
> the macro definition?

No, Wade's macro is right; yours is wrong.  A Lisp function gets 
*evaluated* arguments and returns its evaluated value.  If you want 
mapply to work on syntax trees and not on values, pass it a syntax tree:

(defun Fmapply (function tree)
   (append (list function) (rest tree)))

Do you understand the difference between each of these:

(Fmapply '+ '(g 3 10))             ; (+ 3 10)
(eval (Fmapply '+ '(g 3 10)))      ; 13
(eval '(Fmapply '+ '(g 3 10)))     ; (+ 3 10)
(eval (list 'Fmapply '+ '(g 3 10)) ; ERROR!!!

    -- MJF
From: Richard Fateman
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <spRNe.299$Ux3.286@newssvr21.news.prodigy.com>
At the risk of pouring oil on a fire that might, if
left alone, just go out...

(defun mapply(f target) (cons f (cdr target))

Or possibly

(defun mapply(f target)(meval (cons f (cdr target))))

where meval is whatever is meant by "set the Mathematica engine
going on this argument")


No macros
no runtime vs compile time.
(You don't want to talk about what Mathematica calls "compile")

Imagine a lisp in which quote is either optional or wrong most of
the time.
That is, in an ordinary lisp,

if you type
x
you get something like
Error: Attempt to take the value of the unbound variable `x'.
   [condition type: unbound-variable]

If you type into Mathematica,
x
you get
x

If you want to quote x in Mathematica, you almost can do
it by Hold[x],  but that doesn't really do it. Because to
ever evaluate x you must Release[]  it.
Then it is evaluated "infinitely".


Now extend that to so-called functions.
If you type
(f x)  in lisp but f is undefined, you get an error.
If you type
f[x] in Mathematica under the same circumstances you get
f[x].


Of course f in Mma is not a function, regardless of how many times
Jon says so, or even when the Mma manual says so, at least
in the way there are lisp functions.

f[x] in Mathematica is just an item in its mouth. It has
to chew on it... are there any rules that can match f[x]?
in which case, run the rules. And then simplify the result and
do it again. etc.

It is perfectly reasonable for someone who has learned
Mathematica to be extremely confused about the notion
of quote, evaluation, etc. because there are many items
like Hold, Release, but different. Most are stopgaps introduced
after the initial design, and they don't really work.

Unfortunate, though.


RJF


M Jared Finder wrote:

> Jon Harrop wrote:
> 
>> Wade Humeniuk wrote:
>>
>>> Your macro is wrong, here is a corrected version and a multi-arg
>>> version ...
>>
>>
>> I've just been playing with it a bit and I had overlooked a mistake in 
>> your
>> macro. It should return an unevaluated expression.
>>
>> Here's another go:
>>
>> * (defmacro mapply (f (_ &rest args))
>>   (declare (ignore _))
>>   `'(,f ,@args))
>>
>> MAPPLY
>> * (mapply + (g 3 10))
>>
>> (+ 3 10)
>>
>> That's better. Now we need to mimic the Mathematica evaluator using 
>> another
>> macro that reduces expressions like "3+10". I think Lisp's built-in 
>> "eval"
>> will suffice. So we have:
> 
> 
> EVAL is not a macro.  EVAL is a function.  Lisp macros allow you to 
> write your own Lisp special forms, without having to rewrite the 
> function EVAL.
> 
>>
>> * (eval (mapply + (g 3 10)))
>>
>> 13
>>
>> There. :-)
>>
>> I'm still confused about the quotes. Why do I need `' on the last line of
>> the macro definition?
> 
> 
> No, Wade's macro is right; yours is wrong.  A Lisp function gets 
> *evaluated* arguments and returns its evaluated value.  If you want 
> mapply to work on syntax trees and not on values, pass it a syntax tree:
> 
> (defun Fmapply (function tree)
>   (append (list function) (rest tree)))
> 
> Do you understand the difference between each of these:
> 
> (Fmapply '+ '(g 3 10))             ; (+ 3 10)
> (eval (Fmapply '+ '(g 3 10)))      ; 13
> (eval '(Fmapply '+ '(g 3 10)))     ; (+ 3 10)
> (eval (list 'Fmapply '+ '(g 3 10)) ; ERROR!!!
> 
>    -- MJF
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307d892$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
M Jared Finder wrote:
> Jon Harrop wrote:
>> I'm still confused about the quotes. Why do I need `' on the last line of
>> the macro definition?
> 
> No, Wade's macro is right; yours is wrong.

Can you explain how mine is wrong? It seems to work.

> A Lisp function gets 
> *evaluated* arguments and returns its evaluated value. If you want 
> mapply to work on syntax trees and not on values, pass it a syntax tree:

Yes, these functions should be AST -> AST.

> (defun Fmapply (function tree)
>    (append (list function) (rest tree)))
> 
> Do you understand the difference between each of these:
> 
> (Fmapply '+ '(g 3 10))             ; (+ 3 10)

Pass the Fmapply macro the ASTs "+" and "g 3 10". It replaces the function,
returning the AST "3+10".

> (eval (Fmapply '+ '(g 3 10)))      ; 13

Same thing but apply Lisp's eval to the AST, evaluating it to give the Lisp
run-time value "13".

> (eval '(Fmapply '+ '(g 3 10)))     ; (+ 3 10)

Same thing but quoting the already quoted result of Fmapply (call it "x")
and then evaluating it to give "x", i.e. eval removes one of the two levels
of quotation.

> (eval (list 'Fmapply '+ '(g 3 10)) ; ERROR!!!

This seems to hang.

Let me try a simpler example:

  (eval (list '+ '1 '2))

Makes a list of the ASTs "+", "1" and "2" and evaluates it, i.e. evaluating
(+ 1 2).

So:

  (eval (list 'Fmapply ''+ ''(g 3 10))

should evaluate to (Fmapply '+ '(g 3 10)) and then to (+ 3 10) and then to
13. But it doesn't, it also hangs. What am I missing?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: M Jared Finder
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <VaadnSw_OaDIepreRVn-3w@speakeasy.net>
Jon Harrop wrote:
> M Jared Finder wrote:
>>Jon Harrop wrote:
>>
>>>I'm still confused about the quotes. Why do I need `' on the last line of
>>>the macro definition?
>>
>>No, Wade's macro is right; yours is wrong.
> 
> Can you explain how mine is wrong? It seems to work.
> 
>>A Lisp function gets 
>>*evaluated* arguments and returns its evaluated value. If you want 
>>mapply to work on syntax trees and not on values, pass it a syntax tree:
> 
> Yes, these functions should be AST -> AST.

Correct.  And a macro is a function that has that exact purpose.  Your 
macro was wrong because you were fighting with Lisp's evaluation model. 
  Read on...

>>(defun Fmapply (function tree)
>>   (append (list function) (rest tree)))
>>
>>Do you understand the difference between each of these:
>>
>>(Fmapply '+ '(g 3 10))             ; (+ 3 10)
> 
> Pass the Fmapply macro the ASTs "+" and "g 3 10". It replaces the function,
> returning the AST "3+10".

First, Fmapply is *not* a macro.  Lisp macros are defined with defmacro. 
  Fmapply was defined with defun, so it is a normal Lisp function. 
Other than that, you are correct.

>>(eval (Fmapply '+ '(g 3 10)))      ; 13
> 
> Same thing but apply Lisp's eval to the AST, evaluating it to give the Lisp
> run-time value "13".

Yes, the result AST (called an S-exp or a form in Lisp nomenclature),
(+ 3 10), is evaluated, which results in the value 13.

>>(eval '(Fmapply '+ '(g 3 10)))     ; (+ 3 10)
> 
> Same thing but quoting the already quoted result of Fmapply (call it "x")
> and then evaluating it to give "x", i.e. eval removes one of the two levels
> of quotation.

Right result, but not quite for the right reason.  Quote is a special 
operator that returns its parameter *unevaluated*, so

'(Fmapply '+ '(g 3 10))

is the same as

(quote (Fmapply '+ '(g 3 10)))

and they both evaluate to

(Fmapply '+ '(g 3 10))

Note that (Fmapply '+ '(g 3 10)) did not get evaluated by QUOTE; it got 
evaluated by EVAL.

>>(eval (list 'Fmapply '+ '(g 3 10)) ; ERROR!!!
> 
> This seems to hang.

Oops!  I forgot a right parenthesis.  Serves me right for not testing 
out the form.  It should have been (eval (list 'Fmapply '+ '(g 3 10))), 
which errors with the message, "EVAL: undefined function G".

> Let me try a simpler example:
> 
>   (eval (list '+ '1 '2))
> 
> Makes a list of the ASTs "+", "1" and "2" and evaluates it, i.e. evaluating
> (+ 1 2).
> 
> So:
> 
>   (eval (list 'Fmapply ''+ ''(g 3 10))
> 
> should evaluate to (Fmapply '+ '(g 3 10)) and then to (+ 3 10) and then to
> 13. But it doesn't, it also hangs. What am I missing?

A right parenthesis.  Fixing that makes it evaluate to (+ 3 10), not 13.

You are still thinking in terms of Mathematica's evaluation model, which 
seems to be "evaluate recursively until you reach a form that you do not 
know how to handle".  In comparison, Lisp's evaluation model is much 
simpler.  In brief:

* A symbol evaluates to the variable it denotes.  (NIL and T are two 
symbols that evaluate to themselves.)
* A non-symbol atom evaluates to itself.  An atom is a non-list, or the 
empty list, NIL.
* A list is evaluated based on its first member (remember, the empty 
list is the symbol NIL, so we don't need to worry about that case).
   * If the first member is a symbol that names a macro, then that macro 
function is evaluated using the rest of the list as parameters.  The 
result is recursively evaluated.
   * If the first member is a symbol that names a function, than that 
function is evaluated.  Each member of the rest of the list is 
recursively evaluated, left to right, and those values are passed to the 
function as parameters.  The result is returned.
   * If the first member is a lambda expression, then that function is 
evaluated just like if the first member was a symbol that named a function.
   * If the first member is a symbol that names a special operator, than 
the rule for that special operator is used.  There are exactly 25 
special operators in Common Lisp.
* Otherwise, an error is signaled.

A more precise description can be found at 
<http://www.lisp.org/HyperSpec/Body/sec_3-1-2.html>.

Because a macro form gets evaluated at least twice, (mapply + (g 3 10)) 
evaluates to 13.  This is good, as it allows macros to be compiled away.

Your "fix" added an extra quote to the result of mapply, canceling out 
the recursive evaluation.  This is bad for multiple reasons.  It is 
slower, less flexible, and less compatible with the rest of Lisp.  If 
you feel that you must have (mapply + (g 3 10)) evaluate to (+ 3 10) and 
not 13, then you want mapply to be a function, not a macro.[1]

   -- MJF

[1] You could also use MACROEXPAND as your evaluator, but then you would 
not be able to have (+ 3 10) evaluate to 13, since + is a function, not 
a macro.
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307eb1f$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
M Jared Finder wrote:
> Jon Harrop wrote:
>> Yes, these functions should be AST -> AST.
> 
> Correct.  And a macro is a function that has that exact purpose.  Your
> macro was wrong because you were fighting with Lisp's evaluation model.

I see.

>>>(Fmapply '+ '(g 3 10))             ; (+ 3 10)
>> 
>> Pass the Fmapply macro the ASTs "+" and "g 3 10". It replaces the
>> function, returning the AST "3+10".
> 
> First, Fmapply is *not* a macro.  Lisp macros are defined with defmacro.
>   Fmapply was defined with defun, so it is a normal Lisp function.
> Other than that, you are correct.

Ok. So functions can handle ASTs and the difference between using a macro
and using a function is when they are applied (macroexpansion time vs run
time).

>>>(eval (Fmapply '+ '(g 3 10)))      ; 13
>> 
>> Same thing but apply Lisp's eval to the AST, evaluating it to give the
>> Lisp run-time value "13".
> 
> Yes, the result AST (called an S-exp or a form in Lisp nomenclature),
> (+ 3 10), is evaluated, which results in the value 13.

Right.

>>>(eval '(Fmapply '+ '(g 3 10)))     ; (+ 3 10)
>> 
>> Same thing but quoting the already quoted result of Fmapply (call it "x")
>> and then evaluating it to give "x", i.e. eval removes one of the two
>> levels of quotation.
> 
> Right result, but not quite for the right reason.  Quote is a special
> operator that returns its parameter *unevaluated*, so
> 
> '(Fmapply '+ '(g 3 10))
> 
> is the same as
> 
> (quote (Fmapply '+ '(g 3 10)))
> 
> and they both evaluate to
> 
> (Fmapply '+ '(g 3 10))
> 
> Note that (Fmapply '+ '(g 3 10)) did not get evaluated by QUOTE; it got
> evaluated by EVAL.

Yes, of course. So the (eval '(...)) is first evaluated to (...).

>>>(eval (list 'Fmapply '+ '(g 3 10)) ; ERROR!!!
>> 
>> This seems to hang.
> 
> Oops!  I forgot a right parenthesis.  Serves me right for not testing
> out the form.

For some reason I didn't think of that. :-)

> It should have been (eval (list 'Fmapply '+ '(g 3 10))), 
> which errors with the message, "EVAL: undefined function G".

Right. Because the arguments to Fmapply should be double quoted.

>> So:
>> 
>>   (eval (list 'Fmapply ''+ ''(g 3 10))
>> 
>> should evaluate to (Fmapply '+ '(g 3 10)) and then to (+ 3 10) and then
>> to 13. But it doesn't, it also hangs. What am I missing?
> 
> A right parenthesis.  Fixing that makes it evaluate to (+ 3 10), not 13.

Yes, of course.

> You are still thinking in terms of Mathematica's evaluation model, which
> seems to be "evaluate recursively until you reach a form that you do not
> know how to handle".

I was aware of that difference. However, I am not sure how the latter could
be implemented using Lisp macros. Of course, it shouldn't be implemented
using Lisp macros...

> In comparison, Lisp's evaluation model is much simpler.

I might try writing a Lisp interpreter in OCaml. Should be a good
lesson. :-)

> Because a macro form gets evaluated at least twice, (mapply + (g 3 10))
> evaluates to 13.  This is good, as it allows macros to be compiled away.

I guessed that macroexpansion wouldn't be very optimised but run-time would
be.

> Your "fix" added an extra quote to the result of mapply, canceling out
> the recursive evaluation.  This is bad for multiple reasons.  It is
> slower, less flexible, and less compatible with the rest of Lisp.  If
> you feel that you must have (mapply + (g 3 10)) evaluate to (+ 3 10) and
> not 13, then you want mapply to be a function, not a macro.[1]

Yes, got it.

> [1] You could also use MACROEXPAND as your evaluator, but then you would
> not be able to have (+ 3 10) evaluate to 13, since + is a function, not
> a macro.

So my use of "eval" was a workaround for that. Yes, I didn't like it when I
wrote it. :-)

Thanks for the help!

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Bruce Stephens
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <871x4owka6.fsf@cenderis.demon.co.uk>
Jon Harrop <······@jdh30.plus.com> writes:

[...]

> I've just been playing with it a bit and I had overlooked a mistake
> in your macro. It should return an unevaluated expression.

It sounds as though you may be introducing low-level details of how
Mathematica's evaluation system works.

> Here's another go:
>
> * (defmacro mapply (f (_ &rest args))
>   (declare (ignore _))
>   `'(,f ,@args))
>
> MAPPLY
> * (mapply + (g 3 10))
>
> (+ 3 10)

Yuck.

> That's better. Now we need to mimic the Mathematica evaluator using
> another macro that reduces expressions like "3+10". I think Lisp's
> built-in "eval" will suffice. So we have:
>
> * (eval (mapply + (g 3 10)))
>
> 13

Yuck again.

I don't think that's at all how lisp macros are intended to work

[...]
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307d51b$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Bruce Stephens wrote:
> [...]
> 
>> I've just been playing with it a bit and I had overlooked a mistake
>> in your macro. It should return an unevaluated expression.
> 
> It sounds as though you may be introducing low-level details of how
> Mathematica's evaluation system works.

No, this is really core stuff. In Mathematica, there is only one type and
that type is AST. All values are ASTs and all functions are sets of rules
that pattern match on ASTs and replace them with other ASTs. So there is a
strong resemblance to Lisp's macros.

There are many details to how Mathematica evaluates expressions but I won't
go into those here. As someone who's never used macros in any language, I
just find it cool that you can implement CAS using Lisp macros.

> ...
> I don't think that's at all how lisp macros are intended to work

Agreed. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <IYONe.231971$on1.145662@clgrps13>
Jon Harrop wrote:
> Wade Humeniuk wrote:
> 
>>Your macro is wrong, here is a corrected version and a multi-arg
>>version ...
> 
> 
> I've just been playing with it a bit and I had overlooked a mistake in your
> macro. It should return an unevaluated expression.
> 

Well...  if you want to symbolically expand the macro,
CL already allows for that.

In Lisp this would be better done by

(defmacro mapply (f (_ &rest args))
   (declare (ignore _))
   `(,f ,@args))

[Note: It is only the REPL that actually evals (mapply + (g 1 2 3)).
Within a body of code it would be inserted as (+ 1 2 3) (and then
probably optimized to 6 by the compiler).]

CL-USER 1 > (mapply + (g 1 2 3))
6

CL-USER 2 > (macroexpand '(mapply + (g 1 2 3)))
(+ 1 2 3)
T

CL-USER 3 >

Wade

> Here's another go:
> 
> * (defmacro mapply (f (_ &rest args))
>   (declare (ignore _))
>   `'(,f ,@args))
> 
From: Peter Seibel
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <m2mzncw4zg.fsf@gigamonkeys.com>
Wade Humeniuk <··················@telus.net> writes:

> Jon Harrop wrote:
>> Wade Humeniuk wrote:
>> 
>>> Your macro is wrong, here is a corrected version and a multi-arg
>>> version ...
>>
>> I've just been playing with it a bit and I had overlooked a mistake
>> in your macro. It should return an unevaluated expression.
>> 
>
> Well...  if you want to symbolically expand the macro,
> CL already allows for that.
>
> In Lisp this would be better done by
>
> (defmacro mapply (f (_ &rest args))
>    (declare (ignore _))
>    `(,f ,@args))

Based on what some other posters have explained about Mathematica, it
sounds like this might be more nearly analogous to what Mathematica is
actually doing:

  (defmacro mapply (f form)
    `(,f ,@(rest (macroexpand form))))

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Wade Humeniuk
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <oOPNe.209533$tt5.189942@edtnps90>
Peter Seibel wrote:

> 
> Based on what some other posters have explained about Mathematica, it
> sounds like this might be more nearly analogous to what Mathematica is
> actually doing:
> 
>   (defmacro mapply (f form)
>     `(,f ,@(rest (macroexpand form))))
> 

That would make more sense.  (I think I see how it might actually
be used).

Wade
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307d365$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Wade Humeniuk wrote:
> Peter Seibel wrote:
>>   (defmacro mapply (f form)
>>     `(,f ,@(rest (macroexpand form))))
> 
> That would make more sense.  (I think I see how it might actually
> be used).

Yes. So, in Mathematica, arguments are evaluated (macroexpand in Lisp) and
then replacement rules are applied. I can see how that Lisp is a better
equivalent.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jens Axel Søgaard
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <430769f6$0$649$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:

> OCaml's grammar contains much of the complexity but also a great deal more
> (such as the typing of polymorphic variants). To put things in perspective,
> my (incomplete) Mathematica grammar was 200LOC and OCaml's grammer is
> 1,500LOC.

Two hundred lines doesn't sound of much, but there is that "incomplete" 
caveat. However, fear not, others have written a complete Mathematica
to Lisp converter:

     <http://http.cs.berkeley.edu/~fateman/papers/lmath.ps>

-- 
Jens Axel S�gaard
From: Jon Harrop
Subject: Re: Lisp syntax vs. Mathematica syntax
Date: 
Message-ID: <4307c851$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Jens Axel S�gaard wrote:
> Two hundred lines doesn't sound of much, but there is that "incomplete"
> caveat.

Yes. IIRC, my main omissions were in the lexer (which has to do quite a lot
of work to understand Mathematica input) and some bits of the grammar (like
";"). I think it was ~80% complete.

> However, fear not, others have written a complete Mathematica 
> to Lisp converter:
> 
>      <http://http.cs.berkeley.edu/~fateman/papers/lmath.ps>

Yes. I think they preferred the OCaml. ;-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Michael Sullivan
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1h1jlk4.7ep0npylu2j2N%use-reply-to@spambegone.null>
Jon Harrop <······@jdh30.plus.com> wrote:

> Ulrich Hobelmann wrote:
> > Jon Harrop wrote:
> >> Joe Marshall wrote:
> >>>      Infix macros are *hard*.
> >> 
> >> Do you mean it is difficult to implement infix in Lisp?
> > 
> > No, others mentioned that *there are* infix parsers (reader macros) for
> > Lisp.  Most people don't use them I guess :D
> 
> Then why are they "hard"? Perhaps I misunderstood Joe. I thought he meant it
> was difficult to implement infix operators in Lisp. Now I'm thinking maybe
> he meant it is easy to implement infix operators but it is then difficult
> to write macros that use infix syntax?
> 
> Presumably that is a Lisp-specific problem because Mathematica has no
> problem using infix notation...

No problem because (like the package referred to by Ulrich), it's
already been written.

I can't speak for Joe, but I think by "hard" he meant "a non-trivial
programming project".  Not AI hard, but not something a competent
programmer can hack together a half-decent solution to in 5-10 minutes.
I can hack together a half-decent solution to parsing basic arithmetic
s-exp math pretty quickly (the reader does most of the work), and I
wouldn't claim to be especially competent in CL.  Infix is a project.
You can't seriously be claiming that implementing Mathematica was not
"hard" frvo "hard"?

The problem with the hardness doesn't come with plain usage (because,
like Mathematica is already written, the libraries for that are already
written), but with macrology.  If I want to just plop a lot of math
expressions in my lisp program and I'm comfortable with infix, or I want
a user who doesn't understand lisp math to enter standard math
expressions and have them evaluated (like Mathematica), that's a
difficult, but clearly solvable problem, and look, a few people have
written libraries for it.

If I want to programmatically manipulate language math expressions with
macros, I need to understand a lot of details about the infix parsing
everytime I write a new macro. 

It is inherently difficult to write macros on expressions with infix
syntax and operator precedence.  Inherently more so than on expressions
with prefix or postfix syntax and no precedence.   That's true no matter
what language you are using.  In most languages, it's not expected that
you can do the kind of macrology lispers expect no matter the input, so
this is not noticeable.  



Michael
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2iry1q2j6.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Ulrich Hobelmann wrote:
>> Jon Harrop wrote:
>>> Joe Marshall wrote:
>>>>      Infix macros are *hard*.
>>> 
>>> Do you mean it is difficult to implement infix in Lisp?
>> 
>> No, others mentioned that *there are* infix parsers (reader macros) for
>> Lisp.  Most people don't use them I guess :D
>
> Then why are they "hard"? Perhaps I misunderstood Joe. I thought he meant it
> was difficult to implement infix operators in Lisp. Now I'm thinking maybe
> he meant it is easy to implement infix operators but it is then difficult
> to write macros that use infix syntax?

I think the latter is what he meant; if a good chunk of the syntax of
your language is infix it's harder to write macros because the macros
need to keep track of what kind of operators are being used in the
code being generated and emit differently structured code
depending. Whereas with a regular syntax (such as prefix) it's, well,
more regular.

> Presumably that is a Lisp-specific problem because Mathematica has no
> problem using infix notation...

I'm not sure it's been established that Mathematica has macros that
are similar in power to Lisp's macros. If you want to see what an
honest attempt to combine infix operators with Lisp-style macros looks
like you should check out Dylan or (more recently) the Java Syntactic
Extender[1]. However to understand the comparison you'll, of course,
have to learn something about Common Lisp macros. And if you really
want to go to town, look into Scheme's various macro systems which
offer a slightly different take on the problem.

>>>>      You need to learn the prefix notation *anyway* (because code that
>>>>      operates on code needs to operate at the abstract syntax level,
>>>>      which in lisp is naturally prefix-notated lists).
>>> 
>>> So Lisp is rather tied to the built-in prefix notation.
>> 
>> Not at all.  Implement whatever syntax you want.  If you don't like Lisp
>> syntax at all use a complete, different syntax (and language) like Dylan.
>
> So you disagree with Joe saying that "lisp is naturally prefix-notated
> lists". You believe that prefix/infix/postfix makes no difference in Lisp?

Not that it makes no difference. Rother, on one hand Lisp by default
uses a syntax based on prefix-notated lists. A bunch of other language
features (such as macros) then take advantage of that default
syntax. On the other hand, Lisp is flexible enough that you can
implement a different surface syntax fairly easily if you really want
to. However, you shouldn't be surprised when you find that the other
features of the language, that co-evolved with the standard prefix
syntax are hard to duplicate or translate into your new syntax. You
may want to look up Guy Steele's and Richard Gabriel's paper in the
History of Programming Langugaes II proceedings where, among other
things, they talk about the tendency of new Lispers to go through a
rite of passage of implementing their own syntax and then abandoning
it when they discover that you lose a lot more than just the surface
syntax when you abandon the prefix notation.

-Peter

[1] <http://people.csail.mit.edu/jrb/jse/index.htm>

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Brian Downing
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <f1pNe.46316$084.44542@attbi_s22>
In article <··············@gigamonkeys.com>,
Peter Seibel  <·····@gigamonkeys.com> wrote:
> > Presumably that is a Lisp-specific problem because Mathematica has no
> > problem using infix notation...
> 
> I'm not sure it's been established that Mathematica has macros that
> are similar in power to Lisp's macros. If you want to see what an
> honest attempt to combine infix operators with Lisp-style macros looks
> like you should check out Dylan or (more recently) the Java Syntactic
> Extender[1]. However to understand the comparison you'll, of course,
> have to learn something about Common Lisp macros. And if you really
> want to go to town, look into Scheme's various macro systems which
> offer a slightly different take on the problem.

To Mathematica's credit its syntax always boils down to it's own
funny-looking sexprs:

(2 + {3, 4, 5}) ~Frob~ x   is really   Frob[Plus[2, List[3, 4, 5]], x]

So given that you can programmatically manipulate Mathematica symbolic
expressions, it's probably not a stretch to say that in theory you can
generate arbitrary code like you can with CL macros.

The problem, as mentioned in another branch of the thread here, is that
the Mathematica syntax is incredibly complicated to support some of this
flexibility, and the Mathematica evaluator is so /incredibly/ complex as
to basically prohibit understanding of what's going to happen when
things are evaluated, at least for me.  There's certainly no concept of
"macroexpansion time" and "run time" like there is in CL, and there's no
simple QUOTE operator - things keep evaluating until they stop changing
unless wrapped in special Hold forms that have to stick with it to keep
it from evaluating in the future.

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062035$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
Brian Downing wrote:
> In article <··············@gigamonkeys.com>,
>> I'm not sure it's been established that Mathematica has macros that
>> are similar in power to Lisp's macros. If you want to see what an
>> honest attempt to combine infix operators with Lisp-style macros looks
>> like you should check out Dylan or (more recently) the Java Syntactic
>> Extender[1]. However to understand the comparison you'll, of course,
>> have to learn something about Common Lisp macros. And if you really
>> want to go to town, look into Scheme's various macro systems which
>> offer a slightly different take on the problem.

Can you give me a reference explaining the difference between Lisp and
Scheme here? I didn't realise this existed...

> To Mathematica's credit its syntax always boils down to it's own
> funny-looking sexprs:
> 
> (2 + {3, 4, 5}) ~Frob~ x   is really   Frob[Plus[2, List[3, 4, 5]], x]

Yes, exactly.

> So given that you can programmatically manipulate Mathematica symbolic
> expressions, it's probably not a stretch to say that in theory you can
> generate arbitrary code like you can with CL macros.

Absolutely. In this respect, I do not think you can get any more general
than Mathematica. The main disadvantage is the performance cost of this
generality. On the other hand, I believe you can replace the lexer
(reader?) in Lisp, which you cannot do in Mathematica.

> The problem, as mentioned in another branch of the thread here, is that
> the Mathematica syntax is incredibly complicated to support some of this
> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
> to basically prohibit understanding of what's going to happen when
> things are evaluated, at least for me.

Not really. It took me 4 days to write a mini-Mathematica implementation in
OCaml. Once you are familiar with Mathematica, it is fairly obvious what's
going on "under the hood".

> There's certainly no concept of "macroexpansion time" and "run time" like
> there is in CL, 

So you cannot use macros to generate code to generate macros to generate
code in Lisp?

> and there's no simple QUOTE operator

What does the quote operator do?

> - things keep evaluating until they stop changing 
> unless wrapped in special Hold forms that have to stick with it to keep
> it from evaluating in the future.

Yes. I thought that was a direct equivalent of QUOTE.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3hddl91yo.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> > There's certainly no concept of "macroexpansion time" and "run time" like
> > there is in CL, 
> 
> So you cannot use macros to generate code to generate macros to generate
> code in Lisp?

Sigh.  Of course you can.  I'm beginning to wonder about your
reasoning abilities.  I mean, how in the world did you "infer" _that_
from what was said?


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2ek8ppyam.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Brian Downing wrote:
>> In article <··············@gigamonkeys.com>,
>>> I'm not sure it's been established that Mathematica has macros that
>>> are similar in power to Lisp's macros. If you want to see what an
>>> honest attempt to combine infix operators with Lisp-style macros looks
>>> like you should check out Dylan or (more recently) the Java Syntactic
>>> Extender[1]. However to understand the comparison you'll, of course,
>>> have to learn something about Common Lisp macros. And if you really
>>> want to go to town, look into Scheme's various macro systems which
>>> offer a slightly different take on the problem.
>
> Can you give me a reference explaining the difference between Lisp and
> Scheme here? I didn't realise this existed...

Sure. Read about Common Lisp macros. Then read about Scheme
macros. Then note that they are quite different. For extra credit,
note that they also share a deep underlying similarity. I don't mean
to be flip but you're going to have to learn about some of this stuff
yourself to be able to understand it.

>> There's certainly no concept of "macroexpansion time" and "run time" like
>> there is in CL, 
>
> So you cannot use macros to generate code to generate macros to
> generate code in Lisp?

How on earth did you get from what Brian said to that.

>> and there's no simple QUOTE operator
>
> What does the quote operator do?

Hmmm. Do you know about the Hyperspec? The answer to all your
questions (and more) about Common Lisp are contained therein. Or if
you want to learn about Common Lisp in a more expository form form, I
can't help but recommend the book listed in my .sig. What kind of
comparative linguist are you if can't even go to the effort to look up
a few words in the "dictionary" to see what they mean.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Greg Buchholz
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124484271.984092.114800@z14g2000cwz.googlegroups.com>
Peter Seibel wrote:
> Sure. Read about Common Lisp macros. Then read about Scheme
> macros. Then note that they are quite different. For extra credit,
> note that they also share a deep underlying similarity.

    Adding more fuel (and potential confusion) to the fire, André van
Tonder has come up with a simpler Scheme macro system that uses a
hygienic quasiquote and unquote...

http://groups.google.com/group/comp.lang.scheme/msg/510d60aa9ec06f2e


Greg Buchholz
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062ee3$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Peter Seibel wrote:
>>> There's certainly no concept of "macroexpansion time" and "run time"
>>> like there is in CL,
>>
>> So you cannot use macros to generate code to generate macros to
>> generate code in Lisp?
> 
> How on earth did you get from what Brian said to that.

I assume he meant Lisp is evaluated like this:

1. Macros are applied.
2. Resulting macro-free code is evaluated.

In which case (2) cannot generate more macros.

I guess the Mathematica equivalent is that there is only macroexpansion-time
and no run-time. Presumably you can remove the "run-time" from Lisp and
implement execution as a macro that interprets code?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Michael Sullivan
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1h1jtdz.s657f01peceimN%use-reply-to@spambegone.null>
Jon Harrop <······@jdh30.plus.com> wrote:

> Peter Seibel wrote:
> >>> There's certainly no concept of "macroexpansion time" and "run time"
> >>> like there is in CL,
> >>
> >> So you cannot use macros to generate code to generate macros to
> >> generate code in Lisp?

> > How on earth did you get from what Brian said to that.

> I assume he meant Lisp is evaluated like this:
> 
> 1. Macros are applied.
> 2. Resulting macro-free code is evaluated.
> 
> In which case (2) cannot generate more macros.

Well, I can see why you thought that, but it's not accurate.

That there is a distinction between macroexpansion time and run time
doesn't mean that macros can't be expanded at run time, just that they
lose some of the benefits of being expanded at macroexpansion time.
Namely that they expand only once for each time they appear in the
source code text, rather than once for each time they are called during
the run.  

> I guess the Mathematica equivalent is that there is only macroexpansion-time
> and no run-time. Presumably you can remove the "run-time" from Lisp and
> implement execution as a macro that interprets code?

I think the mathematica equivalent is probably that there is only run
time.  Which means that using a macro inside a long loop or recursion
will be much slower, on the order of interpreted v. compiled slower if
the macro does much work.  Obviously in some situations this can't be
avoided in Lisp either (when you don't yet have the code to macroexpand
at compile time).  

Unless code using a macro will be provided by the user or
programatically generated from data, macros can all be (and usually are)
expanded before compile time for a significant run time speed gain.


Michael
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <fyt5ac9q.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Peter Seibel wrote:
>>>> There's certainly no concept of "macroexpansion time" and "run time"
>>>> like there is in CL,
>>>
>>> So you cannot use macros to generate code to generate macros to
>>> generate code in Lisp?
>> 
>> How on earth did you get from what Brian said to that.
>
> I assume he meant Lisp is evaluated like this:
>
> 1. Macros are applied.
> 2. Resulting macro-free code is evaluated.
>
> In which case (2) cannot generate more macros.

It can, but you are assuming that you cannot iterate the process.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43076668$0$17506$ed2e19e4@ptn-nntp-reader04.plus.net>
Joe Marshall wrote:
>> 1. Macros are applied.
>> 2. Resulting macro-free code is evaluated.
>>
>> In which case (2) cannot generate more macros.
> 
> It can, but you are assuming that you cannot iterate the process.

If (1) produces macro-free code, what would be the point in iterating macro
expansion (there aren't any macros to expand)?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124577092.601063.317960@g14g2000cwa.googlegroups.com>
Jon Harrop wrote:

> If (1) produces macro-free code, what would be the point in iterating macro
> expansion (there aren't any macros to expand)?

The iteration is within (1). It continues expanding until there are no
more macros to expand.

Consider this possible definition of `cond':

(defmacro cond (&rest cases)
  (if cases
      `(if ,(caar cases) (progn ,@(cdar cases))
	(cond ,@(cdr cases)))))

Applying this to "(cond (a b) (c d))" produces "(if a (progn b) (cond
(c d)))". That's still got a macro in it, and so that's expanded to
produce "(if a (progn b) (if c (progn d) (cond)))". Again, that still
contains a macro, and is expanded to produce "(if (a (progn b) (if c
(progn d) nil))".

To answer the original question, it's entirely possible for macros to
define new macros. A useful example is from On Lisp: the `defanaph'
macro defines anaphoric versions of existing functions or macros. On
might say

(defanaph aif :calls if :rule :first)

Which would mean that `aif' was an anaphoric macro which called if with
the first argument bound to the variable `it'. Then one can do

(aif (some-expensive-function)
    (do-something-with it))

As shorthand for the longer and potentially more confusing

(let ((xyzzy (some-expensive-function)))
     (if xyzzy
         (do-something-with xyzzy)))
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307c685$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
> To answer the original question, it's entirely possible for macros to
> define new macros...

Excellent. Thanks for the explanation. :-)

So we probably can write something akin to Mathematica's evaluator as Lisp
macros.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mmt48F177hnmU1@individual.net>
Jon Harrop wrote:
> Peter Seibel wrote:
>>>> There's certainly no concept of "macroexpansion time" and "run time"
>>>> like there is in CL,
>>> So you cannot use macros to generate code to generate macros to
>>> generate code in Lisp?
>> How on earth did you get from what Brian said to that.
> 
> I assume he meant Lisp is evaluated like this:
> 
> 1. Macros are applied.
> 2. Resulting macro-free code is evaluated.
> 
> In which case (2) cannot generate more macros.

Almost.  Macros can (and routinely do, in the case of Lisp's builtin 
functions!) call functions.  Of course those functions could have been 
created with the help of macros...

You have to be careful in what order you load those macros and 
functions, especially when you redefine them.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430628f2$0$37087$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:
> Brian Downing wrote:

>>> However to understand the comparison you'll, of course,
>>>have to learn something about Common Lisp macros. And if you really
>>>want to go to town, look into Scheme's various macro systems which
>>>offer a slightly different take on the problem.
> 
> Can you give me a reference explaining the difference between Lisp and
> Scheme here? I didn't realise this existed...

The two keywords are "hygienic" and "referential transparency". See
the introduction and papers on:

     <http://schemecookbook.org/Cookbook/GettingStartedMacros>

Then if really want to know more, head over to:

     <http://library.readscheme.org/page3.html>

-- 
Jens Axel S�gaard
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062986$0$37087$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:
> Brian Downing wrote:
>>In article <··············@gigamonkeys.com>,

> Not really. It took me 4 days to write a mini-Mathematica implementation in
> OCaml. Once you are familiar with Mathematica, it is fairly obvious what's
> going on "under the hood".
> 
>>There's certainly no concept of "macroexpansion time" and "run time" like
>>there is in CL, 
> 
> So you cannot use macros to generate code to generate macros to generate
> code in Lisp?

Separation of macroexpansion time and run time is essential in order
to allow efficient compilation of the code. If you mix the two, chances
are you end up with an "interpreter only" language.

-- 
Jens Axel S�gaard
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m264u1pxjb.fsf@gigamonkeys.com>
Jens Axel S�gaard <······@soegaard.net> writes:

> Jon Harrop wrote:
>> Brian Downing wrote:
>>>In article <··············@gigamonkeys.com>,
>
>> Not really. It took me 4 days to write a mini-Mathematica implementation in
>> OCaml. Once you are familiar with Mathematica, it is fairly obvious what's
>> going on "under the hood".
>> 
>>>There's certainly no concept of "macroexpansion time" and "run time" like
>>> there is in CL, 
>> So you cannot use macros to generate code to generate macros to
>> generate
>> code in Lisp?
>
> Separation of macroexpansion time and run time is essential in order
> to allow efficient compilation of the code. If you mix the two, chances
> are you end up with an "interpreter only" language.

Of course in Common Lisp we have access to the macro-expander and the
compiler *at* runtime so the distinction is not as distinct as it
might be. Which is a good thing. (Though it helps to keep them
distinct in one's mind exactly because they can be temporally
interleaved in almost any possible way.)

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062c51$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Jens Axel S�gaard wrote:
> Separation of macroexpansion time and run time is essential in order
> to allow efficient compilation of the code. If you mix the two, chances
> are you end up with an "interpreter only" language.

No. That's exactly what I was employed to work on. My final Mathematica
implementation wasn't as theoretically capable as the likes of MetaOCaml,
which will do a much better job when it is completed, albeit on a more
restricted language. I can't explain how it works, of course, but most
compiler-heads can guess. Especially Lisp compiler heads. ;-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43063381$0$634$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:
> Jens Axel S�gaard wrote:
> 
>>Separation of macroexpansion time and run time is essential in order
>>to allow efficient compilation of the code. If you mix the two, chances
>>are you end up with an "interpreter only" language.
> 
> No. That's exactly what I was employed to work on. My final Mathematica
> implementation wasn't as theoretically capable as the likes of MetaOCaml,
> which will do a much better job when it is completed, albeit on a more
> restricted language. I can't explain how it works, of course, but most
> compiler-heads can guess.

I have a feeling you misunderstood the "interpreter only" remark. Some
languages like Python have too many dynamic features, which hinders
*efficient* compilation. Both Common Lisp (and Scheme) have been
designed to allow efficient compilation.

The (language independent) problem of mixing macro expansion time and 
run time is discussed in "Composable and Compilable Macros: You Want it 
When?" by Matthew Flatt available here:

     <http://www.cs.utah.edu/plt/publications/macromod.pdf>

-- 
Jens Axel S�gaard
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43063609$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Jens Axel S�gaard wrote:
> I have a feeling you misunderstood the "interpreter only" remark. Some
> languages like Python have too many dynamic features, which hinders
> *efficient* compilation. Both Common Lisp (and Scheme) have been
> designed to allow efficient compilation.
> 
> The (language independent) problem of mixing macro expansion time and
> run time is discussed in "Composable and Compilable Macros: You Want it
> When?" by Matthew Flatt available here:
> 
>      <http://www.cs.utah.edu/plt/publications/macromod.pdf>

He mentions work related to mine in his "Other work" section at the end, but
the thrust of his work is quite different.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mmntbF16kpeaU1@individual.net>
Jon Harrop wrote:
>> and there's no simple QUOTE operator
> 
> What does the quote operator do?

Basically allow you to create symbols (unique values that can be 
compared for equality) and to create lists verbatim, so you don't have 
to create them with (list ...) or cons.

>> - things keep evaluating until they stop changing 
>> unless wrapped in special Hold forms that have to stick with it to keep
>> it from evaluating in the future.
> 
> Yes. I thought that was a direct equivalent of QUOTE.

Hm, dunno.  Quote is for creating Lisp values (lists etc.).  If you 
don't quote, the list you write is evaluated.

In Lisp macros, I think you need quote, so you can create symbols like 
IF, FUNCALL and others (literal function or variable names), because 
without quote they'd be evaluated right away.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062cf8$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Ulrich Hobelmann wrote:
> Hm, dunno.  Quote is for creating Lisp values (lists etc.).  If you
> don't quote, the list you write is evaluated.
> 
> In Lisp macros, I think you need quote, so you can create symbols like
> IF, FUNCALL and others (literal function or variable names), because
> without quote they'd be evaluated right away.

Right. I think that's exactly the same as "Hold" in Mathematica.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <BIRNe.293879$xm3.215241@attbi_s21>
In article <·························@ptn-nntp-reader01.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> Right. I think that's exactly the same as "Hold" in Mathematica.

Not really.

(defparameter *test* (quote (+ 2 2)))   ==> *TEST* contains (+ 2 2).

test = Hold[2 + 2]   ==> test contains Hold[2 + 2]

i.e., Hold "sticks" with the expression, whereas QUOTE is just a signal
to the evaluator to pass its contents through unevaluated.  You can't
build something as simple as QUOTE in Mathematica because the evaluator
will keep evaluating until it stops changing - the CL QUOTE would be a
noop in the Mathematica evaluator.

Note also that CL has a very strong sense of object identity.  If I say
(quote (+ 2 2)), I get the exact same (EQ) cons out that I put in.  I
don't think Mathematica has anything like EQ, so I'm not sure how you'd
test to see if the Plus[2, 2] in a hold is the same Plus[2, 2] you put
in earlier.

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4307f387$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Brian Downing wrote:
> In article <·························@ptn-nntp-reader01.plus.net>,
> Jon Harrop  <······@jdh30.plus.com> wrote:
>> Right. I think that's exactly the same as "Hold" in Mathematica.
> 
> Not really.

Both QUOTE and Hold retain subexpressions in unevaluated form. Perhaps the
similarity ends there...

> (defparameter *test* (quote (+ 2 2)))   ==> *TEST* contains (+ 2 2).
> 
> test = Hold[2 + 2]   ==> test contains Hold[2 + 2]
> 
> i.e., Hold "sticks" with the expression, whereas QUOTE is just a signal
> to the evaluator to pass its contents through unevaluated.  You can't
> build something as simple as QUOTE in Mathematica because the evaluator
> will keep evaluating until it stops changing - the CL QUOTE would be a
> noop in the Mathematica evaluator.

You can see Hold in Mathematica but not QUOTE in Lisp but isn't that more a
function of the pretty printer rather than the evaluation?

> Note also that CL has a very strong sense of object identity.  If I say
> (quote (+ 2 2)), I get the exact same (EQ) cons out that I put in.  I
> don't think Mathematica has anything like EQ, so I'm not sure how you'd
> test to see if the Plus[2, 2] in a hold is the same Plus[2, 2] you put
> in earlier.

In[1]:= Hold[2+2] === Hold[2+2]

Out[1]= True

In[2]:= Hold[a+b] === Hold[b+a]

Out[2]= False

So the Mathematica:

  Release[Hold[expr]]

is equivalent to the Lisp:

  (eval (quote expr))

Beyond that, evaluation is completely different. But just looking at QUOTE
and Hold, there is an uncanny resemblance.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <kQSNe.293958$xm3.263215@attbi_s21>
In article <························@ptn-nntp-reader02.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> Both QUOTE and Hold retain subexpressions in unevaluated form. Perhaps the
> similarity ends there...

Indeed.

> Brian Downing wrote:
> > (defparameter *test* (quote (+ 2 2)))   ==> *TEST* contains (+ 2 2).
> > 
> > test = Hold[2 + 2]   ==> test contains Hold[2 + 2]
> > 
> > i.e., Hold "sticks" with the expression, whereas QUOTE is just a signal
> > to the evaluator to pass its contents through unevaluated.  You can't
> > build something as simple as QUOTE in Mathematica because the evaluator
> > will keep evaluating until it stops changing - the CL QUOTE would be a
> > noop in the Mathematica evaluator.
> 
> You can see Hold in Mathematica but not QUOTE in Lisp but isn't that more a
> function of the pretty printer rather than the evaluation?

Absolutely not.  QUOTE is /gone/.  It has served its purpose.  Lisp only
evaluates things once.

> > Note also that CL has a very strong sense of object identity.  If I say
> > (quote (+ 2 2)), I get the exact same (EQ) cons out that I put in.  I
> > don't think Mathematica has anything like EQ, so I'm not sure how you'd
> > test to see if the Plus[2, 2] in a hold is the same Plus[2, 2] you put
> > in earlier.
> 
> In[1]:= Hold[2+2] === Hold[2+2]
> 
> Out[1]= True
> 
> In[2]:= Hold[a+b] === Hold[b+a]
> 
> Out[2]= False

CL-USER 14 > (eq (quote (+ 2 2)) (quote (+ 2 2)))
NIL

CL-USER 15 > (defparameter *plus-two-two* (quote (+ 2 2)))
*PLUS-TWO-TWO*

CL-USER 16 > (eq *plus-two-two* *plus-two-two*)
T

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Peter Seibel
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <m2iry0vt8t.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Brian Downing wrote:
>> In article <·························@ptn-nntp-reader01.plus.net>,
>> Jon Harrop  <······@jdh30.plus.com> wrote:
>>> Right. I think that's exactly the same as "Hold" in Mathematica.
>> 
>> Not really.
>
> Both QUOTE and Hold retain subexpressions in unevaluated form. Perhaps the
> similarity ends there...
>
>> (defparameter *test* (quote (+ 2 2)))   ==> *TEST* contains (+ 2 2).
>> 
>> test = Hold[2 + 2]   ==> test contains Hold[2 + 2]
>> 
>> i.e., Hold "sticks" with the expression, whereas QUOTE is just a signal
>> to the evaluator to pass its contents through unevaluated.  You can't
>> build something as simple as QUOTE in Mathematica because the evaluator
>> will keep evaluating until it stops changing - the CL QUOTE would be a
>> noop in the Mathematica evaluator.
>
> You can see Hold in Mathematica but not QUOTE in Lisp but isn't that more a
> function of the pretty printer rather than the evaluation?

No. At least in Lisp case, the QUOTE form was evaluated and it
evaluated to the quoted expression. The pretty printer has no way of
knowing whether a form came from a QUOTE expression or was constructed
in some other way:

  '(+ 1 2) ==> (+ 1 2)

where ==> means "evaluates to". But so does:

  (list '+ 1 2) ==> (+ 1 2)

and also:

  (cons '+ (cons 1 (cons 2 nil))) ==> (+ 1 2)

Given any of those (+ 1 2) lists, you can't tell how it was
produced. And if you then evaluate the list you get 3:

  (eval '(+ 1 2))                        ==> 3
  (eval (list '+ 1 2))                   ==> 3
  (eval (cons '+ (cons 1 (cons 2 nil)))) ==> 3

By contrast (at least according to what others have said in this
thread), in Mathematica Hold "evaluates" to something that carries
it's "heldness" with it until someone explicitly Releases it. Thus you
have:

  Hold[whatever] ==> Hold[whatever]

and assuming for the sake of argument there is an EVAL that does the
same thing as implied by the ==> above:

  EVAL[Hold[whatever]] ==> Hold[whatever]

while

  Release[Hold[whatever]] ==> whatever

but Release (again, as I understand it) is not a general purpose
evaluator--all it does is strip off a level of heldness (or maybe all
levels of heldness?)  Of course I could be all wrong about Mathematica
having never used it; I'm just going by the other descriptions in this
thread.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308020f$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Peter Seibel wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> You can see Hold in Mathematica but not QUOTE in Lisp but isn't that more
>> a function of the pretty printer rather than the evaluation?
> 
> No. At least in Lisp case, the QUOTE form was evaluated and it
> evaluated to the quoted expression. The pretty printer has no way of
> knowing whether a form came from a QUOTE expression or was constructed
> in some other way:
> 
>   '(+ 1 2) ==> (+ 1 2)

Right, but Hold[1+2] is Mathematica's equivalent of the quoted expression,
i.e. an expression held in unevaluated form.

> where ==> means "evaluates to". But so does:
> 
>   (list '+ 1 2) ==> (+ 1 2)
>
> and also:
> 
>   (cons '+ (cons 1 (cons 2 nil))) ==> (+ 1 2)

In Mathematica:

In[20]:= Hold[{1, 2}] /. {List -> Plus}

Out[20]= Hold[1 + 2]

The latter "quoted" expression is indistinguishable from the
directly-created one, just like the Lisp.

> Given any of those (+ 1 2) lists, you can't tell how it was
> produced. And if you then evaluate the list you get 3:
> 
>   (eval '(+ 1 2))                        ==> 3
>   (eval (list '+ 1 2))                   ==> 3
>   (eval (cons '+ (cons 1 (cons 2 nil)))) ==> 3

Yes, this is also the same:

In[21]:= Release[Hold[1 + 2]]

Out[21]= 3

In[22]:= Release[Hold[{1, 2}] /. {List -> Plus}]

Out[22]= 3

As Nathan Baum just explained, you can tell between different instantiations
of the same quoted expression in Lisp, whereas you cannot in Mathematica.

> By contrast (at least according to what others have said in this
> thread), in Mathematica Hold "evaluates" to something that carries
> it's "heldness" with it until someone explicitly Releases it. Thus you
> have:
> 
>   Hold[whatever] ==> Hold[whatever]
> 
> and assuming for the sake of argument there is an EVAL that does the
> same thing as implied by the ==> above:
> 
>   EVAL[Hold[whatever]] ==> Hold[whatever]

Yes, I think its "Evaluate" rather than EVAL. In Mathematica,
Hold[Evaluate[expr]] evaluates expr and then holds it, IIRC.

> while
> 
>   Release[Hold[whatever]] ==> whatever
> 
> but Release (again, as I understand it) is not a general purpose
> evaluator--all it does is strip off a level of heldness (or maybe all
> levels of heldness?)

Exactly (the former). This is where the similarity begins to breakdown.
However, despite the difference in method of evaluation, the results are
the same.

> Of course I could be all wrong about Mathematica 
> having never used it; I'm just going by the other descriptions in this
> thread.

I think you've got the right idea. Mathematica then repeatedly evaluates the
resulting expression until it stops changing. This might seem like a
massive departure from other languages, including Lisp, but actually it is
virtually the same thing.

In Mathematica, if the resulting expression is an integer then it won't
change so evaluation stops.

If the resulting expression is a call to a recursive function then that
function will be invoked and the repeated evaluation by Mathematica simply
corresponds to the recursive function calls in most other languages.

So, term-level interpreters for Mathematica and Lisp are very similar.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de92mt$18v$1@newsg1.svr.pol.co.uk>
Jon Harrop wrote:
> 
> I think you've got the right idea. Mathematica then repeatedly evaluates the
> resulting expression until it stops changing. This might seem like a
> massive departure from other languages, including Lisp, but actually it is
> virtually the same thing.

Well, no. It's a fundamentally different method of evaluation. Almost 
all generally used languages evaluate each expression just once.

It only _looks_ like Lisp is similar to Mathematica because Lisp 
expressions can evaluate to Lisp expressions. If Lisp was virtually the 
same as Mathematica, programs could only be written to use 
self-evaluating types: there'd be no lists or symbols.

   (print '(list + 1 2))

wouldn't print (LIST + 1 2), nor (+ 1 2), but 3. The initial evaluation 
of QUOTE would produce (LIST + 1 2), which is not a self-evaluating form 
and would therefore be evaluated to produce (+ 1 2), which is not a 
self-evaluating form and would therefore be evaluated to produce 3, 
which is a self-evaluating form and would therefore not be evaluated 
because further evaluation would have no effect.

If you wanted to print (LIST + 1 2), you'd have to do

   (print (hold-form (list + 1 2)))

It might look like you wouldn't need HOLD-FORM and

   (print (car (hold (list + 1 2))))

would suffice. But the car would 'rescue' the list application from the 
hold and therefore cause it to be evaluated.
From: ··············@hotmail.com
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <1124602138.784760.119880@g47g2000cwa.googlegroups.com>
Nathan Baum wrote:
>
> It only _looks_ like Lisp is similar to Mathematica because Lisp
> expressions can evaluate to Lisp expressions. If Lisp was virtually the
> same as Mathematica, programs could only be written to use
> self-evaluating types: there'd be no lists or symbols.
>
>    (print '(list + 1 2))
>
> wouldn't print (LIST + 1 2), nor (+ 1 2), but 3. The initial evaluation
> of QUOTE would produce (LIST + 1 2), which is not a self-evaluating form
> and would therefore be evaluated to produce (+ 1 2), which is not a
> self-evaluating form and would therefore be evaluated to produce 3,
> which is a self-evaluating form and would therefore not be evaluated
> because further evaluation would have no effect.

Careful; remember Common Lisp *has* a variable with the name + (which
evaluates to the previous form form evaluated at the REPL). I'm
surprised no one has been burned by this yet in this discussion.
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de945g$evk$1@newsg4.svr.pol.co.uk>
··············@hotmail.com wrote:
> Nathan Baum wrote:
> 
>>It only _looks_ like Lisp is similar to Mathematica because Lisp
>>expressions can evaluate to Lisp expressions. If Lisp was virtually the
>>same as Mathematica, programs could only be written to use
>>self-evaluating types: there'd be no lists or symbols.
>>
>>   (print '(list + 1 2))
>>
>>wouldn't print (LIST + 1 2), nor (+ 1 2), but 3. The initial evaluation
>>of QUOTE would produce (LIST + 1 2), which is not a self-evaluating form
>>and would therefore be evaluated to produce (+ 1 2), which is not a
>>self-evaluating form and would therefore be evaluated to produce 3,
>>which is a self-evaluating form and would therefore not be evaluated
>>because further evaluation would have no effect.
> 
> 
> Careful; remember Common Lisp *has* a variable with the name + (which
> evaluates to the previous form form evaluated at the REPL). I'm
> surprised no one has been burned by this yet in this discussion.
> 

Hmm. You mean that evaluation would proceed as

   (print '(list + 1 2))
   => (print (list + 1 2))
   => (print (list (print '(list + 1 2)) 1 2))
   => (print (list (print (list + 1 2)) 1 2))
   => (print (list (print (list (print '(list + 1 2)) 1 2)) 1 2))
   => (print (list (print (list (print (list + 1 2)) 1 2)) 1 2))
   => (print (list (print (list (print (list (print '(list + 1 2)) 1 2))
                           1 2)) 1 2))
   => ...

until we got a stack overflow or out of memory error, depending upon how 
the expansion was implemented.

Presumably, a Lisp which used Mathematica-style evaluation would want to 
be a Lisp-1, so this kind of problem could not occur.
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de94c2$g3j$1@news6.svr.pol.co.uk>
Nathan Baum wrote:
> Presumably, a Lisp which used Mathematica-style evaluation would want to 
> be a Lisp-1, so this kind of problem could not occur.
Or be a Lisp-2, but not have any standard functions which the same names 
as standard variables.
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de8t75$hk$1@newsm1.svr.pol.co.uk>
Jon Harrop wrote:
>>
>>i.e., Hold "sticks" with the expression, whereas QUOTE is just a signal
>>to the evaluator to pass its contents through unevaluated.  You can't
>>build something as simple as QUOTE in Mathematica because the evaluator
>>will keep evaluating until it stops changing - the CL QUOTE would be a
>>noop in the Mathematica evaluator.
> 
> 
> You can see Hold in Mathematica but not QUOTE in Lisp but isn't that more a
> function of the pretty printer rather than the evaluation?

No, it's a function of it not being there.

A   => <the value of A>
'A  => A
''A => 'A a.k.a. (QUOTE A)

This distinction is very significant, because QUOTE only 'protects' 
against one round of evaluation.

(EVAL A)   => <the result of evaluating the value of A>
(EVAL 'A)  => <the value of A>
(EVAL ''A) => A

In a hypothetical Mathematica-like dialect of Lisp,

A          => <the value of A>
'A         => <the value of A>
''A        => <the value of A>
(EVAL A)   => <the value of A>
(EVAL 'A)  => <the value of A>
(EVAL ''A) => <the value of A>

> 
> Beyond that, evaluation is completely different. But just looking at QUOTE
> and Hold, there is an uncanny resemblance.
> 

If you ignore the name, syntax and behaviour.
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4307fbd3$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
> This distinction is very significant, because QUOTE only 'protects'
> against one round of evaluation.
> 
> (EVAL A)   => <the result of evaluating the value of A>
> (EVAL 'A)  => <the value of A>
> (EVAL ''A) => A
> 
> In a hypothetical Mathematica-like dialect of Lisp,
> 
> A          => <the value of A>
> 'A         => <the value of A>
> ''A        => <the value of A>
> (EVAL A)   => <the value of A>
> (EVAL 'A)  => <the value of A>
> (EVAL ''A) => <the value of A>

No:

In[15]:= a=3

Out[15]= 3

In[16]:= a

Out[16]= 3

In[17]:= Hold[a]

Out[17]= Hold[a]

In[18]:= Release[Hold[a]]

Out[18]= 3

In[19]:= Release[Hold[Hold[a]]]

Out[19]= Hold[a]

So Hold "protects against one round" of Release.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <1124600717.113059.254050@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Nathan Baum wrote:
> > This distinction is very significant, because QUOTE only 'protects'
>
....
> In[18]:= Release[Hold[a]]
>
> Out[18]= 3
>
> In[19]:= Release[Hold[Hold[a]]]
>
> Out[19]= Hold[a]
>
> So Hold "protects against one round" of Release.

OK. I can't take any more of this. You have got to be a troll; you have
an incredibly annoying ability to completely miss the point and make
false analogies.

Release[] is absolutely and completely unlike Lisp's eval. It exists to
remove Hold[] and then Mathematica's "infinite" evaluation takes over,
as soon as it has been let out of the Hold[] cage. Only the most
pig-headed dolt would maintain that infinite is anywhere close to 1.
You'd have half a chance of understanding this if you would FORGET
everything you ever learned about Mathematica, or thought exactly the
*opposite* until you were absolutely convinced otherwise.

You have a serious problem. You have obviously spent a lot of energy in
the last few days writing and trying things out in Mathematica and
Lisp, but you have spent very little time internalizing what people
have been telling you. What you might think you have learned CANNOT be
learned in just a few days, no matter how brilliant you are. Instead,
you have built a quick mental translation table which you use to move
back and forth between Mathematica, OCaml, and Lisp. But this is a
quick-and-dirty approach which will ultimately fail you. You will be
forever marked as a dilletante, and believe me, nothing is easier to
spot.

In order to truly understand a fundamentally new language, you must
construct a *new* mental model, or else you will end up speaking in
half-truths and building brittle, strangely constructed programs. That
takes time. Like months. Even years. And requires continuous polishing
and adjustment, because even the most experienced programmer has
forgotten or made false assumptions at some point.

Yes, a quick translation and some quick hacking will let you write
programs in a new language. They might even work for the intended
purpose. But you cannot expect to declaim on comp.lang.lisp about the
meaning of eval without having paid your dues. If KMP says something
about eval, or RJF says something about computer algebra, I pay
attention. Sometimes they make mistakes, or use confusing shorthand, or
neglect to mention background assumptions, or whatever. They're humans
not gods. But I pay *attention*. I assume *I'm* wrong until I have
truly and completely parsed out what they are saying. But when *you*
say something about eval, I have to cringe.

Understanding the possibilities of Lisp macros is quite possibly a
lifetime of work. At a minimum, however, you have to read a book the
equivalent of Graham's On Lisp or read code to the same level, and
actually write substantial programs that use Lisp macros in a wide
range of ways. I've been reading about this stuff and playing with it
for years, and still don't know all the answers.

Some parting hints, because I truly can't take reading any more of this
discussion

* Always be on the lookout for similarities between what you think you
know and what you think you don't know (you've got this down), but BE
AWARE that these will always lead you astray. Remember these points,
because they are the most likely to bite you in the ass once you think
you know what you are doing, but actually still don't. The last
surprise is that there are no more surprises.

* Pause and reflect after an experiment. Don't just run the easy cases
and think you've got the answers. Reformulate your answer, and try *to
prove it wrong.* This takes long periods of concentration, and
reflection, to do correctly. There are NO SHORTCUTS.

* PAY ATTENTION when the experts tell you that you are wrong or
off-base. (Also, beware the "experts" who are just as clueless as you)
That single word "No." is sometimes shorthand for, "Kid, you've got a
long way to go. And I don't even know where to start explaining."
Experts, especially ones spending their free time on Usenet, don't have
time to hold every hand and carefully teach every newcomer. Be grateful
they spend the time to respond at all.

* Realize that people have been programming for longer than you've been
alive (and some of them post), and still, we haven't found the silver
bullet. Most true problems in computing (or any field) are actually
quite hard, and in some sense unfathomable. If you solve something in a
instant, you've probably missed the real problem. At best, you've
probably rediscovered some trick that was published back in
1960-something, and it probably wasn't new then, either. A related
truth: quick benchmarks and comparisons always make gross assumptions,
which make their conclusions usually moot in the real world.

You are obviously a very quick mind. But speed is not enough. You need
to be humble in the face of the monumental scale of the combined
intellect that has gone before you. Fast talking is no substitute for
deep thinking. von Neumann could do new work and make it look easy. He
was almost certainly a hell of a lot smarter than you or I, and the
world was younger then. We're long past the point where some
wet-behind-the-ears Ph.D. in a non-related field has all the answers.
From: Peter Seibel
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <m27jefx6i6.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Nathan Baum wrote:
>> This distinction is very significant, because QUOTE only 'protects'
>> against one round of evaluation.
>> 
>> (EVAL A)   => <the result of evaluating the value of A>
>> (EVAL 'A)  => <the value of A>
>> (EVAL ''A) => A
>> 
>> In a hypothetical Mathematica-like dialect of Lisp,
>> 
>> A          => <the value of A>
>> 'A         => <the value of A>
>> ''A        => <the value of A>
>> (EVAL A)   => <the value of A>
>> (EVAL 'A)  => <the value of A>
>> (EVAL ''A) => <the value of A>
>
> No:
>
> In[15]:= a=3
>
> Out[15]= 3
>
> In[16]:= a
>
> Out[16]= 3
>
> In[17]:= Hold[a]
>
> Out[17]= Hold[a]

This is the difference. If Hold were equivalent to QUOTE then it would be:

  Out[17] = a

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Richard Fateman
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <l4UNe.936$GV7.310@newssvr25.news.prodigy.net>
The problem is that Hold and Release are so strange that
Nathan can't quite guess what they do.  Here is what
Mathematica 5.0 does.

do a bunch of assignments...

{a1 = a2; a2 = a3; a3 = a4}

What is the value of a1, a2, a3?  they are all

a4.

That is, one "evaluation"  removes all the indirections.
What someone familiar with lisp would think of as
(quote(quote ....)) gets all the quotes removed.

Jon thinks quote is like Hold.

If we do a bunch of Held assignments...

{b1 = Hold[b2]; b2 = Hold[b3]; b3 = Hold[b4]}

now the value of b1 is Hold[b2].
and the only way to get down to b4  is to do
multiple Releases. Even though there is an Evaluate,
that doesn't work. Evaluate[b1] is Hold[b2].

So Evaluate and Hold are not "inverses".  But
it can't be that Release is the same as lisp's
evaluate, because
Release merely removes [one layer of..] Hold.
Evaluate happens any time something changes in
an expression.  In fact what Nathan (and probably
many others might think happens is that, consistent
with Mathematica's philosophy, Release does
all the layers.  It doesn't But this can be done by

FixedPoint[Release,b1]   which results in b4.

Why would Nathan thing this FixedPoint calculation
is what Release does? Mathematica ordinarily DOES do a kind of
FixedPoint calculation for its evaluation. However, it
lets Holds stick like raisins in a pudding until
you release them. And then it only releases one
level.

In fact, the strange version of programming
represented by Hold/Release is the tip of the
iceberg. There is also Hold, HoldFirst, ReleaseAll,
Unevaluated, Evaluate.

Note that
Length[Unevaluated[1+2+3+4]]   is 4

but
x= Unevaluated[1+2+3+4]

Length[x]    is 1

If you have followed this, you probably know more about
Hold and Release than 99.9% of Mathematica users.

As I said before, most users have a limited exposure
to the complicated program, and thus only see a small
subset of its peculiarities.

(like the different semantics for Compiled programs,
the wrong scope rules for Block, repaired partly by
introducing Module; peculiar arithmetic, etc.)


Certainly it is possible to do interesting computations
in Mathematica in spite of its flaws.  But it shows
some of the things that can go wrong with
language design by an amateur.

RJF




Jon Harrop wrote:

> Nathan Baum wrote:
> 
>>This distinction is very significant, because QUOTE only 'protects'
>>against one round of evaluation.
>>
>>(EVAL A)   => <the result of evaluating the value of A>
>>(EVAL 'A)  => <the value of A>
>>(EVAL ''A) => A
>>
>>In a hypothetical Mathematica-like dialect of Lisp,
>>
>>A          => <the value of A>
>>'A         => <the value of A>
>>''A        => <the value of A>
>>(EVAL A)   => <the value of A>
>>(EVAL 'A)  => <the value of A>
>>(EVAL ''A) => <the value of A>


In Mathematica 5.0,

> 
> 
> No:
> 
> In[15]:= a=3
> 
> Out[15]= 3
> 
> In[16]:= a
> 
> Out[16]= 3
> 
> In[17]:= Hold[a]
> 
> Out[17]= Hold[a]
> 
> In[18]:= Release[Hold[a]]
> 
> Out[18]= 3
> 
> In[19]:= Release[Hold[Hold[a]]]
> 
> Out[19]= Hold[a]
> 
> So Hold "protects against one round" of Release.
> 
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de93no$7ls$1@newsm1.svr.pol.co.uk>
Richard Fateman wrote:
> 
> The problem is that Hold and Release are so strange that
> Nathan can't quite guess what they do.

Luckily, I don't need to guess. I consulted the delightful online 
documentation.

 > In fact what Nathan (and probably
> many others might think happens is that, consistent
> with Mathematica's philosophy, Release does
> all the layers.

No, you've misinterpreted what I was doing. I was showing that if Lisp 
used Mathematica-style evaluation, then QUOTE, not Hold, would have no 
effect.

This was perfectly clear to me, but in retrospect I didn't make it at 
all clear in my post.
From: Joe Marshall
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <ek8m3xht.fsf@ccs.neu.edu>
> Richard Fateman <·······@cs.berkeley.edu> writes:
>>In fact, the strange version of programming
>>represented by Hold/Release is the tip of the
>>iceberg.


> ···@zedat.fu-berlin.de (Stefan Ram) writes:
>   Another evaluation model can be found in TeX, which uses
>   "\expandafter" as some kind of "eval" and "\noexpand" as some
>   kind of "quote". And "\csname" can be used vaguely like
>   "funcall".

To paraphrase Richard Fateman,
  ``Certainly it is possible to do interesting computations
    in TeX in spite of its flaws.  But it shows
    some of the things that can go wrong with
    language design by an amateur.''
From: ··············@hotmail.com
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <1124725497.996753.222680@g43g2000cwa.googlegroups.com>
Joe Marshall wrote:
> > Richard Fateman <·······@cs.berkeley.edu> writes:
> >>In fact, the strange version of programming
> >>represented by Hold/Release is the tip of the
> >>iceberg.
>
>
> > ···@zedat.fu-berlin.de (Stefan Ram) writes:
> >   Another evaluation model can be found in TeX, which uses
> >   "\expandafter" as some kind of "eval" and "\noexpand" as some
> >   kind of "quote". And "\csname" can be used vaguely like
> >   "funcall".
>
> To paraphrase Richard Fateman,
>   ``Certainly it is possible to do interesting computations
>     in TeX in spite of its flaws.  But it shows
>     some of the things that can go wrong with
>     language design by an amateur.''

One must concede, however, that Donald E. Knuth is a very different
sort of amateur than Stephen Wolfram. :-)
From: Joe Marshall
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <ek8l3n5f.fsf@ccs.neu.edu>
···············@hotmail.com" <············@gmail.com> writes:

> Joe Marshall wrote:
>> > Richard Fateman <·······@cs.berkeley.edu> writes:
>> >>In fact, the strange version of programming
>> >>represented by Hold/Release is the tip of the
>> >>iceberg.
>>
>>
>> > ···@zedat.fu-berlin.de (Stefan Ram) writes:
>> >   Another evaluation model can be found in TeX, which uses
>> >   "\expandafter" as some kind of "eval" and "\noexpand" as some
>> >   kind of "quote". And "\csname" can be used vaguely like
>> >   "funcall".
>>
>> To paraphrase Richard Fateman,
>>   ``Certainly it is possible to do interesting computations
>>     in TeX in spite of its flaws.  But it shows
>>     some of the things that can go wrong with
>>     language design by an amateur.''
>
> One must concede, however, that Donald E. Knuth is a very different
> sort of amateur than Stephen Wolfram. :-)

Yes.
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de8vb1$vd2$1@newsg3.svr.pol.co.uk>
Jon Harrop wrote:
> Nathan Baum wrote:
> 
>>This distinction is very significant, because QUOTE only 'protects'
>>against one round of evaluation.
>>
>>(EVAL A)   => <the result of evaluating the value of A>
>>(EVAL 'A)  => <the value of A>
>>(EVAL ''A) => A
>>
>>In a hypothetical Mathematica-like dialect of Lisp,
>>
>>A          => <the value of A>
>>'A         => <the value of A>
>>''A        => <the value of A>
>>(EVAL A)   => <the value of A>
>>(EVAL 'A)  => <the value of A>
>>(EVAL ''A) => <the value of A>
> 
> 
> No:
> 

You misunderstand me. I mean that the Mathematica equivalent of 'X, 
which I suppose is just (X), would behave that way.

Mathematica has no _real_ equivalent of Lisp's QUOTE, since Mathematica 
'fully evaluates' every expression: i.e. keeps evaluating it until 
further evaluation will not produce a different value. Hold is possible 
precisely because Hold[x] evalutes to Hold[x].

Another example of the difference between QUOTE and Hold: = holds its 
first argument, so assuming b doesn't have a value,

   a = b

assigns b to a, but Evaluate overrides Hold so

   Evaluate[a] = 42

assigns 42 to b, the result of evaluating a. (Or so the Matgematica 
documentation tells me)

The Lisp function setq quotes its first argument, so

   (setq a 'b)

assigns 'b to a. But eval _doesn't_ override this, and

   (setq (eval a) 42)

is an error.
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308047d$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
> You misunderstand me. I mean that the Mathematica equivalent of 'X,
> which I suppose is just (X), would behave that way.

I don't understand. The Mathematica equivalent of 'X is Hold[X]?

> Mathematica has no _real_ equivalent of Lisp's QUOTE, since Mathematica
> 'fully evaluates' every expression: i.e. keeps evaluating it until
> further evaluation will not produce a different value. Hold is possible
> precisely because Hold[x] evalutes to Hold[x].

But "fully evaluating" to the same expression is the same as leaving
unevaluated?

> Another example of the difference between QUOTE and Hold: = holds its
> first argument, so assuming b doesn't have a value,
> 
>    a = b
> 
> assigns b to a, but Evaluate overrides Hold so
> 
>    Evaluate[a] = 42
> 
> assigns 42 to b, the result of evaluating a. (Or so the Matgematica
> documentation tells me)
> 
> The Lisp function setq quotes its first argument, so
> 
>    (setq a 'b)
> 
> assigns 'b to a. But eval _doesn't_ override this, and
> 
>    (setq (eval a) 42)
> 
> is an error.

Yes. The Lisp equivalent is probably:

  (eval (list `setq a 42))

which sets b to 42.

Note that this has nothing to do with the similarity between QUOTE and Hold.
This is entirely to do with evaluation semantics, which are definitely not
the same.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de92v2$1e5$1@newsg1.svr.pol.co.uk>
Jon Harrop wrote:
> Nathan Baum wrote:
> 
>>You misunderstand me. I mean that the Mathematica equivalent of 'X,
>>which I suppose is just (X), would behave that way.
> 
> 
> I don't understand. The Mathematica equivalent of 'X is Hold[X]?

No. Because Mathematica has a wildly different evaluation model, its 
equivalent of 'X is simply (X) -- Mathematica keeps evaluating until an 
expression doesn't change. 'X changes when it is evaluated, and 
therefore, from Mathematica's point of view, 'X is equivalent to (X).

> 
> Note that this has nothing to do with the similarity between QUOTE and Hold.
> This is entirely to do with evaluation semantics, which are definitely not
> the same.
> 

How do you propose to judge the similarity of QUOTE and Hold _without_ 
considering how they effect the evaluation of expressions?
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308167d$0$97099$ed2619ec@ptn-nntp-reader03.plus.net>
Nathan Baum wrote:
> Jon Harrop wrote:
>> Nathan Baum wrote:
>>>You misunderstand me. I mean that the Mathematica equivalent of 'X,
>>>which I suppose is just (X), would behave that way.
>> 
>> I don't understand. The Mathematica equivalent of 'X is Hold[X]?
> 
> No. Because Mathematica has a wildly different evaluation model, its
> equivalent of 'X is simply (X) -- Mathematica keeps evaluating until an
> expression doesn't change. 'X changes when it is evaluated, and
> therefore, from Mathematica's point of view, 'X is equivalent to (X).

I think this is an important point: all Mathematica values are equivalent to
Lisp s-exprs. So all Mathematica equivalents must be quoted at least once.
So (X) cannot be a Lisp equivalent of anything in Mathematica, AFAIK.

>> Note that this has nothing to do with the similarity between QUOTE and
>> Hold. This is entirely to do with evaluation semantics, which are
>> definitely not the same.
> 
> How do you propose to judge the similarity of QUOTE and Hold _without_
> considering how they effect the evaluation of expressions?

I'm just comparing the effect of eval on QUOTE and Release on Hold. I'm not
trying to say that the subsequent evaluation of the enclosing expression
works in the same way.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de96ao$3lg$1@newsg1.svr.pol.co.uk>
Jon Harrop wrote:
> Nathan Baum wrote:
> 
>>Jon Harrop wrote:
>>
>>>Nathan Baum wrote:
>>>
>>>>You misunderstand me. I mean that the Mathematica equivalent of 'X,
>>>>which I suppose is just (X), would behave that way.
>>>
>>>I don't understand. The Mathematica equivalent of 'X is Hold[X]?
>>
>>No. Because Mathematica has a wildly different evaluation model, its
>>equivalent of 'X is simply (X) -- Mathematica keeps evaluating until an
>>expression doesn't change. 'X changes when it is evaluated, and
>>therefore, from Mathematica's point of view, 'X is equivalent to (X).
> 
> 
> I think this is an important point: all Mathematica values are equivalent to
> Lisp s-exprs. So all Mathematica equivalents must be quoted at least once.
> So (X) cannot be a Lisp equivalent of anything in Mathematica, AFAIK.

Not all. Literals are, by definition, self-evaluating. ``3'' is the same 
thing in Lisp and Mathematica, and 3 and '3 will evaluate to the same 
thing. 3 and ''3 won't, however.

Are 3 and Hold[3] the same thing.

> 
>>>Note that this has nothing to do with the similarity between QUOTE and
>>>Hold. This is entirely to do with evaluation semantics, which are
>>>definitely not the same.
>>
>>How do you propose to judge the similarity of QUOTE and Hold _without_
>>considering how they effect the evaluation of expressions?
> 
> 
> I'm just comparing the effect of eval on QUOTE and Release on Hold. I'm not
> trying to say that the subsequent evaluation of the enclosing expression
> works in the same way.
> 

Perhaps in terms of the relationship between EVAL and ReleaseHold they 
are similar.

The two do, of course, do quite different things, and it is only when 
used with QUOTE in this limited way that EVAL/QUOTE behaves like 
ReleaseHold/Hold.

EVAL evaluates its argument, whilst ReleaseHold simply makes its 
argument available for evaluation and then Mathematica's normal 
evaluator causes it to be evaluated.

If Lisp had Mathematica's evaluation semantics, EVAL/QUOTE would be 
quite unlike ReleaseHold/Hold: it would be like Peter Seibel's 
implementation of RELEASE and HOLD.

In my view, it is flawed to say that EVAL and ReleaseHold serve similar 
purposes just because the semantics of the language mean that they can 
appear in similar expressions which evaluate to the same thing.

Yes,

   ReleaseHold[Hold[3 + 3]]          ==> 3, and
   (EVAL (QUOTE (+ 3 3))             ==> 3

but

   ReleaseHold[Hold[A + B]]          ==> A + B, whilst
   (EVAL (QUOTE (+ A B))             ==> Error

and also

   Hold[3]                           ==> Hold[3], whilst
   (QUOTE 3)                         ==> 3

and of course

   ReleaseHold[X]                    ==> X, whilst
   (EVAL X)                          ==> Error

additionally,

   ReleaseHold[{2, Hold[1 + 1]}]     ==> {2, 2}, whilst
   (EVAL (VECTOR 2 (QUOTE (+ 1 1)))) ==> #(2 (+ 1 1))
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308636a$0$97099$ed2619ec@ptn-nntp-reader03.plus.net>
Nathan Baum wrote:
> Jon Harrop wrote:
>> I think this is an important point: all Mathematica values are equivalent
>> to Lisp s-exprs. So all Mathematica equivalents must be quoted at least
>> once. So (X) cannot be a Lisp equivalent of anything in Mathematica,
>> AFAIK.
> 
> Not all. Literals are, by definition, self-evaluating. ``3'' is the same
> thing in Lisp and Mathematica, and 3 and '3 will evaluate to the same
> thing. 3 and ''3 won't, however.
> 
> Are 3 and Hold[3] the same thing.

Given that "a" and "Hold[a]" are not the same thing then I'd say "expr" and
"Hold[expr]" were not the same thing in general, so no.

>> I'm just comparing the effect of eval on QUOTE and Release on Hold. I'm
>> not trying to say that the subsequent evaluation of the enclosing
>> expression works in the same way.
> 
> Perhaps in terms of the relationship between EVAL and ReleaseHold they
> are similar.

Yes, that's all I meant.

> The two do, of course, do quite different things, and it is only when
> used with QUOTE in this limited way that EVAL/QUOTE behaves like
> ReleaseHold/Hold.

Yes. It caught my eye because it seems like a remarkably straightforward way
to implement Mathematica's capabilities in Lisp, i.e. using s-exprs.

> EVAL evaluates its argument, whilst ReleaseHold simply makes its
> argument available for evaluation and then Mathematica's normal
> evaluator causes it to be evaluated.

Yes. From the point of view of the user, there isn't much difference.

> If Lisp had Mathematica's evaluation semantics, EVAL/QUOTE would be
> quite unlike ReleaseHold/Hold: it would be like Peter Seibel's
> implementation of RELEASE and HOLD.

Ok.

> In my view, it is flawed to say that EVAL and ReleaseHold serve similar
> purposes just because the semantics of the language mean that they can
> appear in similar expressions which evaluate to the same thing.
> 
> Yes,
> 
>    ReleaseHold[Hold[3 + 3]]          ==> 3, and
>    (EVAL (QUOTE (+ 3 3))             ==> 3
> 
> but
> 
>    ReleaseHold[Hold[A + B]]          ==> A + B, whilst
>    (EVAL (QUOTE (+ A B))             ==> Error
> 
> and also
> 
>    Hold[3]                           ==> Hold[3], whilst
>    (QUOTE 3)                         ==> 3
> 
> and of course
> 
>    ReleaseHold[X]                    ==> X, whilst
>    (EVAL X)                          ==> Error
> 
> additionally,
> 
>    ReleaseHold[{2, Hold[1 + 1]}]     ==> {2, 2}, whilst
>    (EVAL (VECTOR 2 (QUOTE (+ 1 1)))) ==> #(2 (+ 1 1))

But these examples are again missing one level of quotation in Lisp. You're
trying to replace Mathematica's evaluator with Lisp's evaluator. Try
keeping everything quoted instead, and forget about the subsequent
evaluation not working.

The second example should be:

Release[Hold[a+b]] ==> a+b
eval ''(+ a b)     ==> (+ a b)

So the first example should be:

Release[Hold[3+3]] ==> 3+3        ( ==> 6)
eval ''(+ 3 3)     ==> (+ 3 3)

As Mathematica's built-in rules are not the same as Lisp's, we should not
let the Lisp evaluator evaluate anything for us. You are quite right that
Lisp's evaluator works for (+ 3 3) but it should also work for '(+ 3 a 3),
to give '(+ 6 a), for example.

With the equivalence that I'm proposing you would then have:

3                  ==> 3
'3                 ==> 3

Hold[3]            ==> Hold[3]
''3                ==> '3

Release[Hold[X]]   ==> X
(eval ''x)         ==> X

Release[Hold[{2, Hold[1+1]}]] ==> {2, Hold[1+1]}
(eval '`,(list 2 '(+ 1 1)))   ==> (2 (+ 1 1))

However, the following doesn't work because eval applies to the root of the
s-expr whereas Release recursively searches for the equivalent of a second
level of quotation that can be removed:

Release[2 + Hold[3 + 4]] ==> 2 + 3 + 4   ( ==> 9)
(eval '(+ 2 '(+ 3 4)))   ==> Argument Y is not a NUMBER: (+ 3 4).

So a better equivalent to Release would remove the second level of quotation
leaving the first intact. Perhaps we should be mapping eval over the given
s-expr, rather than applying it directly...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <dea6o5$pbg$1@news8.svr.pol.co.uk>
Jon Harrop wrote:
> 
> So the first example should be:
> 
> Release[Hold[3+3]] ==> 3+3        ( ==> 6)
> eval ''(+ 3 3)     ==> (+ 3 3)
> 
> As Mathematica's built-in rules are not the same as Lisp's, we should not
> let the Lisp evaluator evaluate anything for us. You are quite right that
> Lisp's evaluator works for (+ 3 3) but it should also work for '(+ 3 a 3),
> to give '(+ 6 a), for example.
> 
> With the equivalence that I'm proposing you would then have:
> 
> 3                  ==> 3
> '3                 ==> 3
> 
> Hold[3]            ==> Hold[3]
> ''3                ==> '3
> 
> Release[Hold[X]]   ==> X
> (eval ''x)         ==> X
 >
 > Release[Hold[{2, Hold[1+1]}]] ==> {2, Hold[1+1]}
 > (eval '`,(list 2 '(+ 1 1)))   ==> (2 (+ 1 1))

But in that expression, Hold[X] is not longer equivalent to ''X: in the 
outermost Hold it's equivalent to '`,X, and in the innermost 'X.

If we rewrite the example to obey your equivalence:

   (eval ''('2 ''(+ 1 1)))
     ==> ('2 ''(+ 1 1))

which is like Mathematica's

   ReleaseHold[Hold[{Hold[2], Hold[Hold[1 + 1]]}]]
     ==> {Hold[2], Hold[Hold[1 + 1]]}

and not at all like

   ReleaseHold[Hold[{2, Hold[1+1]}]]
     ==> {2, Hold[1+1]}

We see again that the comparison between Release/Hold and EVAL/QUOTE is 
ill-founded. It's like saying multiplication and addition are equivalent 
because 2 + 2 == 2 * 2.
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308a04c$0$97102$ed2619ec@ptn-nntp-reader03.plus.net>
Nathan Baum wrote:
> We see again that the comparison between Release/Hold and EVAL/QUOTE is
> ill-founded. It's like saying multiplication and addition are equivalent
> because 2 + 2 == 2 * 2.

Yes, you are quite right. My examples that produced the right answer did so
for the wrong reason. Oh well. So Lisp's eval and quote wouldn't be very
useful for writing a CAS. What do Lispers use eval and quote for then? :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Richard Fateman
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <0B2Oe.7$A%1.5@newssvr13.news.prodigy.com>
We must find this amusing, but I'm not sure why.
Regarding eval, macros, quote in lisp and in
computer algebra systems..



JH is finally right on something.
Lisp's eval function is hardly ever used explicitly by
anyone writing lisp programs on any topic.

  In the (partial) implementation of mathematica
in lisp on my web site, lisp's eval is used 0 times.


Quote is useful. It delivers constant expressions to
programs. See below.

Regarding macros: In that same collection of files,
defmacro is used mainly as a shorthand for inline coding
of function calls, or to avoid lisp's usual evaluation
mechanism in functions like plot... you don't evaluate
the 1st and 2nd args in  plot(sin(x),x,-Pi, Pi).


  Thinking about defmacro as "creating list
structure" is [although in some view] legitimate, that
is not how list structure needs to be created.  All of
JH's maunderings about macros and how that is what
mathematica does, and therefore that is how lisp does
something, is basically wrong. Mathematica doesn't need
or use macros, and neither does a computer algebra system
written in Lisp, simulating Mathematica.

Lisp programs create list structure by using the program
called cons.  Programs which use cons include list, append,
copy, and the lisp reader program.

typing '(+ a b c)    or equivalently (quote (+ a b c))

instructs the lisp system to
read the symbolic expression and then evaluate it.
During the read process, the symbolic expression
(quote (+ a b c))  is consed together. The evaluation
process, considering the meaning of the quote operation,
simply returns cdr  or "rest"  of that list.  So quote
does not create any list structure. The lisp reader
creates it.  quote merely "does nothing"  and that is
how most lisp programmers think of it, I expect.

Mathematica's choice of semantics, a kind of fixed-point
evaluation applied somewhat haphazardly, is not a requirement
of a computer algebra system. Macsyma has a general
rule of evaluate once, but if that doesn't work evaluate
zero times [an implicit quote in front of an unbound
variable, is an effective way of thinking about it].
Macsyma has an option "infeval" to do something similar
to Mathematica. It is hardly ever used. There are a few
exceptions to the try-to-evaluate-once; e.g. plot program
arguments.

RJF





Jon Harrop wrote:

> Nathan Baum wrote:
> 
>>We see again that the comparison between Release/Hold and EVAL/QUOTE is
>>ill-founded. It's like saying multiplication and addition are equivalent
>>because 2 + 2 == 2 * 2.
> 
> 
> Yes, you are quite right. My examples that produced the right answer did so
> for the wrong reason. Oh well. So Lisp's eval and quote wouldn't be very
> useful for writing a CAS. What do Lispers use eval and quote for then? :-)
> 
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <4308ff41$0$97138$ed2619ec@ptn-nntp-reader03.plus.net>
Richard Fateman wrote:
> All of
> JH's maunderings about macros and how that is what
> mathematica does, and therefore that is how lisp does
> something,

You are misrepresenting me. I was not trying to infer how Lisp evaluates
code, I was trying to find out if quote and eval could be useful for
writing an embedded Mathematica-like DSL in Lisp. It seems that they are
not, and the next most obvious Lisp implementation is essentially the same
as other languages. So macros are the only remaining point of interest.

> is basically wrong. Mathematica doesn't need 
> or use macros, and neither does a computer algebra system
> written in Lisp, simulating Mathematica.

Not macros, functions mapping ASTs to ASTs. They are the core of Mathematica
and a language that provided this functionality clearly and efficiently
would be of great use in this kind of work.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Peter Seibel
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <m2ll2uvr6y.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Richard Fateman wrote:

>> Mathematica doesn't need or use macros, and neither does a computer
>> algebra system written in Lisp, simulating Mathematica.
>
> Not macros, functions mapping ASTs to ASTs. They are the core of
> Mathematica and a language that provided this functionality clearly
> and efficiently would be of great use in this kind of work.

Then you should check out Lisp. Macros are interesting in this context
insofar as they are an *example* of a part of Lisp that depends on the
ability to write code in Lisp that does AST->AST transformations.[1]

But ultimately macros are for producing Lisp code.  If you just want
to manipulate arbitrary ASTs and/or if you ultimately want to produce
some other form of output (such as code in another language or machine
code or typeset math) then these Lisp features are what you should be
looking at:

  - The built-in symbol data type and packages.

  - The reader.

  - The printer.

  - DESTRUCTURING-BIND.

  - The many built-in functions for manipulating trees built out of
    cons cells.

and ... wait for it ...

  - QUOTE.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <43099974$0$17485$ed2e19e4@ptn-nntp-reader04.plus.net>
Peter Seibel wrote:
> Then you should check out Lisp.

:-)

> Macros are interesting in this context 
> insofar as they are an *example* of a part of Lisp that depends on the
> ability to write code in Lisp that does AST->AST transformations.[1]

But Lisp functions can also do AST -> AST transformations using the built-in
AST type, available via QUOTE but you never want to EVAL them. Is that
about right?

> But ultimately macros are for producing Lisp code.  If you just want
> to manipulate arbitrary ASTs and/or if you ultimately want to produce
> some other form of output (such as code in another language or machine
> code or typeset math) then these Lisp features are what you should be
> looking at:
> ...

I'll check them out, thanks.

> and ... wait for it ...
> 
>   - QUOTE.

In ML you'd define your own AST type and use pattern matching to manipulate
values of that type. Am I right in thinking that, in Lisp, you would use
Quote to build a Lisp-like AST instead of defining your own AST type?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: M Jared Finder
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <c96dncyf-r0yfZTeRVn-uw@speakeasy.net>
Jon Harrop wrote:
> 
> In ML you'd define your own AST type and use pattern matching to manipulate
> values of that type. Am I right in thinking that, in Lisp, you would use
> Quote to build a Lisp-like AST instead of defining your own AST type?

That's right.  Lisp provides lists as the built-in AST type, so anything 
that creates a list can be used to build up an AST.

   -- MJF
From: Peter Seibel
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <m2vf1xub59.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Peter Seibel wrote:
>> Then you should check out Lisp.
>
> :-)
>
>> Macros are interesting in this context 
>> insofar as they are an *example* of a part of Lisp that depends on the
>> ability to write code in Lisp that does AST->AST transformations.[1]
>
> But Lisp functions can also do AST -> AST transformations using the
> built-in AST type, available via QUOTE but you never want to EVAL
> them. Is that about right?

Yes. Though I should point out that the "built in AST type" really
consists primarily of cons cells and symbols and using QUOTEed forms
is only one way to produce them. It just happens to be a convenient
way when you need to embed a literal AST in some code that is going to
manipulate it. Thus if FROB is a functin that frobs an AST you can
write:

  (frob '(+ 1 2))

or 

  (frob (cons (intern "+" :common-lisp) (cons 1 (cons 2 nil))))

or

or any of a number of ways that all produce the list (+ 1 2) to be
passed to FROB.

>> But ultimately macros are for producing Lisp code.  If you just
>> want to manipulate arbitrary ASTs and/or if you ultimately want to
>> produce some other form of output (such as code in another language
>> or machine code or typeset math) then these Lisp features are what
>> you should be looking at: ...
>
> I'll check them out, thanks.
>
>> and ... wait for it ...
>> 
>>   - QUOTE.
>
> In ML you'd define your own AST type and use pattern matching to
> manipulate values of that type. Am I right in thinking that, in
> Lisp, you would use Quote to build a Lisp-like AST instead of
> defining your own AST type?

Well you don't necessary use only QUOTE as I explained above but all
the facilities for manipulating lists (trees really) built from cons
cells, particularly trees whose leaves are mostly symbols.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <deaig4$tpd$1@newsg2.svr.pol.co.uk>
Jon Harrop wrote:
> Nathan Baum wrote:
> 
>>We see again that the comparison between Release/Hold and EVAL/QUOTE is
>>ill-founded. It's like saying multiplication and addition are equivalent
>>because 2 + 2 == 2 * 2.
> 
> 
> Yes, you are quite right. My examples that produced the right answer did so
> for the wrong reason. Oh well. So Lisp's eval and quote wouldn't be very
> useful for writing a CAS. What do Lispers use eval and quote for then? :-)
> 

Lispers use QUOTE when they want to prevent evaluation of a form. This 
is necessary since Lisp doesn't have distinct syntax for lists and 
forms, which is necessary because Lisp needs to be able to represent 
code as data.

Lispers use EVAL to evaluate data as though it were code. This has 
negative implications for performance, understandability and, sometimes, 
security. Therefore, the use of EVAL should be avoided where possible.

In general, in a situation where you think you need to use EVAL, it 
suffices to use a macro.
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de9042$l30$1@news7.svr.pol.co.uk>
Nathan Baum wrote:
> 
> Another example of the difference between QUOTE and Hold: = holds its 
> first argument, so assuming b doesn't have a value,
> 
>   a = b
> 
> assigns b to a, but Evaluate overrides Hold so
> 
>   Evaluate[a] = 42
> 
> assigns 42 to b, the result of evaluating a. (Or so the Matgematica 
> documentation tells me)
> 
> The Lisp function setq quotes its first argument, so
> 
>   (setq a 'b)
> 
> assigns 'b to a. But eval _doesn't_ override this, and
> 
>   (setq (eval a) 42)
> 
> is an error.


Thinking about it, the SETF macro could be used to emulate Mathematica 
here. It selects a setting method based upon the 'expand-time' value of 
the first argument.

   > (defsetf evaluate (form) (value)
       `(if (symbolp ,form)
            (set ,form ,value)
          (error "Couldn't assign ~A to ~A" ,value ,form)))
   EVAL
   > (defparameter a 'c)
   A
   > (setf a 'b)
   B
   > A
   B
   > B
   *** The variable B is unbound.
   > (setf (evaluate a) 42)
   42
   > A
   B
   > B
   42
From: Nathan Baum
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <de8uq9$rie$1@newsg1.svr.pol.co.uk>
Jon Harrop wrote:
>>Note also that CL has a very strong sense of object identity.  If I say
>>(quote (+ 2 2)), I get the exact same (EQ) cons out that I put in.  I
>>don't think Mathematica has anything like EQ, so I'm not sure how you'd
>>test to see if the Plus[2, 2] in a hold is the same Plus[2, 2] you put
>>in earlier.
> 
> 
> In[1]:= Hold[2+2] === Hold[2+2]
> 
> Out[1]= True
> 
> In[2]:= Hold[a+b] === Hold[b+a]
> 
> Out[2]= False
> 

The function `===' is not the *object* identity function, as the 
Mathematica documentation makes clear:

   lhs === rhs yields True if the expression lhs is identical to rhs, and
   yields False otherwise.

It appears === is roughly equivalent to CL's equalp.

   > (eq '(+ 2 2) '(+ 2 2))
   NIL
   > (equalp '(+ 2 2) '(+ 2 2))
   T

For the sake of argument:

   > (defmacro hold (form) `(list 'hold ',form))
   HOLD
   > (hold (+ 2 2))
   (HOLD (+ 2 2))
From: Peter Seibel
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <m23bp3x585.fsf@gigamonkeys.com>
Nathan Baum <···········@btinternet.com> writes:

> For the sake of argument:
>
>    > (defmacro hold (form) `(list 'hold ',form))
>    HOLD
>    > (hold (+ 2 2))
>    (HOLD (+ 2 2))

And for the sake of grins:

  (defmacro hold (&whole whole form) (list 'quote whole))
  (defun release (held) (eval (second held)))

In some subtle way I think this very neatly demonstrates the relation
between Hold and QUOTE but I've had my evening glass of wine so I
could be wrong about that. HOLD has to have a special evaluation rule
that keeps putting back the QUOTE that evaluating it takes off. And
then RELEASE takes off a level of heldness and allows the held form to
be evaluated.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Lisp's QUOTE and Mathematica's "Hold"
Date: 
Message-ID: <43080278$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Nathan Baum wrote:
>> In[1]:= Hold[2+2] === Hold[2+2]
>> 
>> Out[1]= True
>> 
>> In[2]:= Hold[a+b] === Hold[b+a]
>> 
>> Out[2]= False
> 
> The function `===' is not the *object* identity function, as the
> Mathematica documentation makes clear:
> 
>    lhs === rhs yields True if the expression lhs is identical to rhs, and
>    yields False otherwise.
> 
> It appears === is roughly equivalent to CL's equalp.
> 
>    > (eq '(+ 2 2) '(+ 2 2))
>    NIL
>    > (equalp '(+ 2 2) '(+ 2 2))
>    T

Yes. Out of interest, this is equivalent to the OCaml:

# `Plus(2, 2) == `Plus(2, 2);;
- : bool = false
# `Plus(2, 2) = `Plus(2, 2);;
- : bool = true

However, physical equality isn't very useful, especially in CAS.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87d5o97i24.fsf@thalassa.informatimago.com>
Ulrich Hobelmann <···········@web.de> writes:
> Hm, dunno.  Quote is for creating Lisp values (lists etc.).  
> If you don't quote, the list you write is evaluated.

No.

QUOTE doesn't create anything.  It's a special operator that returns
its argument UNEVALUATED.

The one who create the object returned by QUOTE is usually the reader.

When you write: (quote (a b c))
the reader reads a string "(quote (a b c))"
It then parses this string, building the following s-expression:

+-----------------------------------------------+
|                                               |
| +---+---+   +---+---+                         |
| | * | * |-->| * |NIL|                         |
| +---+---+   +---+---+                         |
|   |           |                               |
|   v           v                               |
| +-------+   +---+---+   +---+---+   +---+---+ |
| | QUOTE |   | * | * |-->| * | * |-->| * |NIL| |
| +-------+   +---+---+   +---+---+   +---+---+ |
|               |           |           |       |
|               v           v           v       |
|             +---+       +---+       +---+     |
|             | A |       | B |       | C |     |
|             +---+       +---+       +---+     |
+-----------------------------------------------+

Then it gives it to the EVAL function which "interprets" it. (It may
very well compile it before executing it).

The point is that when EVAL sees the symbol QUOTE in the CAR of the
top-left CONS cell, it just returns the CAR of the CDR o the top-left
CONS cell, that is just the very same:

           +------------------------------------+
           |                                    |
           |  +---+---+   +---+---+   +---+---+ |
           |  | * | * |-->| * | * |-->| * |NIL| |
           |  +---+---+   +---+---+   +---+---+ |
           |    |           |           |       |
           |    v           v           v       |
           |  +---+       +---+       +---+     |
           |  | A |       | B |       | C |     |
           |  +---+       +---+       +---+     |
           +------------------------------------+

which the printer then prints as: (A B C)


You could very well build the object your self, "quote" it and pass
that to EVAL and get back that same object unchanged:
         
     (let* ((object (cons (quote +) (cons 1 (cons 2 nil))))
            (sexpr  (list (quote quote) object)))
        (print object)
        (print sexpr)
        (assert (eq (eval sexpr) object)))

(+ 1 2) 
'(+ 1 2)   ; the printer prints (quote (+ 1 2)) as: '(+ 1 2)
NIL


> In Lisp macros, I think you need quote, so you can create symbols
> like IF, FUNCALL and others (literal function or variable names),
> because without quote they'd be evaluated right away.

You don't need QUOTE to create symbols.

You may create symbols with INTERN, MAKE-SYMBOL, GENSYM, or just let
the reader create them for you.

In anycase, CL:IF, CL:FUNCALL and others already exist.  Usually you
just let the reader find them when it parses your code.

That said, if you want to build a list of literal symbols interned by
the reader, then you can indeed use QUOTE:

(let ((ch (CHARACTER ·@")))
  (LIST (QUOTE IF) (QUOTE *PRINT-PRETTY*) 
        (LIST (QUOTE FUNCALL) (QUOTE MY-PRINT-PRETTY) ch)))

--> (IF *PRINT-PRETTY* (FUNCALL MY-PRINT-PRETTY ··@))


But this would be silly given the availability of backquote/comma:

(let ((ch (CHARACTER ·@")))
  `(if *print-pretty* (funcall my-print-pretty ,ch)))

--> (IF *PRINT-PRETTY* (FUNCALL MY-PRINT-PRETTY ··@))
 
-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

In a World without Walls and Fences, 
who needs Windows and Gates?
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430621c8$0$37071$edfadb0f@dread12.news.tele.dk>
Brian Downing wrote:

> The problem, as mentioned in another branch of the thread here, is that
> the Mathematica syntax is incredibly complicated to support some of this
> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
> to basically prohibit understanding of what's going to happen when
> things are evaluated, at least for me.  

Yes - the evaluations rules are scary.

First there is the standard evaluation sequence, which consists
of 12 (twelve) steps:

<http://documents.wolfram.com/v5/TheMathematicaBook/MathematicaReferenceGuide/Evaluation/A.4.1.html>

Of course there is also
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <k6ihai0u.fsf@ccs.neu.edu>
Jens Axel S�gaard <······@soegaard.net> writes:

> Brian Downing wrote:
>
>> The problem, as mentioned in another branch of the thread here, is that
>> the Mathematica syntax is incredibly complicated to support some of this
>> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
>> to basically prohibit understanding of what's going to happen when
>> things are evaluated, at least for me.
>
> Yes - the evaluations rules are scary.
>
> First there is the standard evaluation sequence, which consists
> of 12 (twelve) steps:

First, you admit that you are powerless and that your life has become
unmanagable.... 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4306344c$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Jens Axel S�gaard wrote:
> Brian Downing wrote:
>> The problem, as mentioned in another branch of the thread here, is that
>> the Mathematica syntax is incredibly complicated to support some of this
>> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
>> to basically prohibit understanding of what's going to happen when
>> things are evaluated, at least for me.
> 
> Yes - the evaluations rules are scary.
> 
> First there is the standard evaluation sequence, which consists
> of 12 (twelve) steps:

That isn't significantly more complicated than your standard term-level
interpreter for an FPL, IMHO:

let rec eval state = function
  | EBool b -> VBool b
  | EInt x -> VInt x
  | EFloat x -> VFloat x
  | EVar var -> fetch_var state var
  | ELet (false, s, body, rest) ->
      push s (ref (eval state body)) state (fun state -> eval state rest)
  | ELet (true, s, (EFun _ | EFunction _ as body), rest) ->
      (* Initialise to a dummy value. *)
      let v = ref VDummy in
      (* The body is evaluated  *)
      push s v state (fun state ->
                        v := eval state body;
                        eval state rest)
  | ELet (true, s, _, rest) -> raise RecursiveValue
  | EOp1 (UMinus, e) -> VInt (- int_of_value (eval state e))
  | EOp2 (op, e1, e2) -> func_of_op2 op (eval state e1) (eval state e2)
  | EIf (p, t, f) -> eval state (if bool_of_value (eval state p) then t else
f)
  | EFunction patterns -> VFunction (state_copy state, patterns)
  | EFun (var, body) -> VFun { state = state_copy state; var = var; body =
body }
  | EApply (func, arg) ->
      (* Evaluate the function. *)
      begin match eval state func with
      | VFun f ->
          (* Evaluate the body with the argument bound to its value. *)
          push f.var (ref (eval state arg)) f.state (fun state ->
                                                       eval state f.body)
      | VFunction (environment, patterns) ->
          pattern_match environment (eval state arg) patterns
      | VBuiltin f -> f (eval state arg)
      | v -> rtte (TFun (TAny, TAny)) (type_of_value v)
      end
  | EContainer (c, es) ->
      let es = List.map (eval state) es in
      let () = match c with
          Tuple -> ()
        | Array | List -> check_list es in
      begin match c with
        Tuple -> VTuple es
      | Array -> VArray es
      | List -> VList es
      end

That has 14 patterns. Some of those are represented by built-in
transformation rules in Mathematica (e.g. beta reduction). A term-level
interpreter for Lisp is probably roughly the same complexity (I'd guess).

> Of course there is also

?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Mathematica evaluation
Date: 
Message-ID: <1124483808.793362.143910@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Jens Axel Søgaard wrote:
> > Brian Downing wrote:
> >> The problem, as mentioned in another branch of the thread here, is that
> >> the Mathematica syntax is incredibly complicated to support some of this
> >> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
> >> to basically prohibit understanding of what's going to happen when
> >> things are evaluated, at least for me.
> >
> > Yes - the evaluations rules are scary.
> >
> > First there is the standard evaluation sequence, which consists
> > of 12 (twelve) steps:
>
> That isn't significantly more complicated than your standard term-level
> interpreter for an FPL, IMHO:
>

Mathmematica has a rather ill-defined evaluation mechanism, actually.

The key complication is the highlighted portion below

A.4.5 Global Control of Evaluation

In the evaluation procedure described above, two basic kinds of steps
are involved:
* Iteration: evaluate a particular expression

  >>>> until it no longer changes. <<<<

* Recursion: evaluate subsidiary expressions needed to find the value
of a particular expression.

The concept of "no longer changes" is not a formal concept. Lisp's EVAL
has no corresponding concept. Mathematica uses a number of heuristics
to make their detection of "changes" efficient, which can lead to very
strange behaviors, which can be release- and data-dependent, of the
kind described by Fateman in his classic critique.

This is in addition to the rather vague description of "raw objects" in
the Mathematica book, and the possibly complicated notion of
"applicable transformation rules."

Computer algebra systems solve a very different problem from general
purpose programming languages. The resulting programming environment is
dramatically different from Lisp or Algol, no matter how much Wolfram
makes claims to support various programming paradigms. Much of this is
essential and helpful in doing the kind of expression manipulation done
in computer algebra. Comparing it to Lisp's EVAL function, however, is
misleading.
From: Marco Antoniotti
Subject: Re: Mathematica evaluation
Date: 
Message-ID: <O%pOe.54$DJ5.70033@typhoon.nyu.edu>
··············@hotmail.com wrote:
> Jon Harrop wrote:
> 
>>Jens Axel S�gaard wrote:
>>
>>>Brian Downing wrote:
>>>
>>>>The problem, as mentioned in another branch of the thread here, is that
>>>>the Mathematica syntax is incredibly complicated to support some of this
>>>>flexibility, and the Mathematica evaluator is so /incredibly/ complex as
>>>>to basically prohibit understanding of what's going to happen when
>>>>things are evaluated, at least for me.
>>>
>>>Yes - the evaluations rules are scary.
>>>
>>>First there is the standard evaluation sequence, which consists
>>>of 12 (twelve) steps:
>>
>>That isn't significantly more complicated than your standard term-level
>>interpreter for an FPL, IMHO:
>>
> 
> 
> Mathmematica has a rather ill-defined evaluation mechanism, actually.

... and how long did it take for somebody in this thread to state the 
obvious?  :)

Thank you.

Cheers
--
Marco
From: Richard Fateman
Subject: Re: Mathematica evaluation
Date: 
Message-ID: <ty0Pe.3955$Z87.1794@newssvr14.news.prodigy.com>
I thought I would make of note of some observations
by Maxim Rytin in the Mathematica newsgroup.

Unevaluated[2+2]*2      returns 8

but
2*Unevaluated[2+2]      returns  2*Unevaluated[2+2].

Now this, it would seem, is just a bug  (in Mmma version 5.0).
But in fact in the mma newsgroup you see it DEFENDED by
A. Kozlowski, for whom Wolfram can apparently do no wrong.


Search on google groups for Rytin's notes for more gems.  One
nice one is

Module[{f, L},
     L = f[];
      Do[L = f[L, i], {i, 10^4}]
   ] // Timing

Which times a little loop.  However, if you change the name
of the bound variable f   to the name weirdness, the loop
takes 200 times longer.  Oh, and the time to run the loop
becomes quadratic in the number of iterations.

Certainly lisp wouldn't do this.

RJF
From: Andras Simon
Subject: Re: Mathematica evaluation
Date: 
Message-ID: <vcdfysz6y68.fsf@csusza.math.bme.hu>
Richard Fateman <·······@cs.berkeley.edu> writes:

> I thought I would make of note of some observations
> by Maxim Rytin in the Mathematica newsgroup.

Thanks! I love Mathematica bashing!

> 
> Unevaluated[2+2]*2      returns 8
> 
> but
> 2*Unevaluated[2+2]      returns  2*Unevaluated[2+2].
> 
> Now this, it would seem, is just a bug  (in Mmma version 5.0).

It probably is (doesn't happen in 3.0). 

> But in fact in the mma newsgroup you see it DEFENDED by
> A. Kozlowski, for whom Wolfram can apparently do no wrong.

:-)

> 
> 
> Search on google groups for Rytin's notes for more gems.  One
> nice one is
> 
> Module[{f, L},
>      L = f[];
>       Do[L = f[L, i], {i, 10^4}]
>    ] // Timing
> 
> Which times a little loop.  However, if you change the name
> of the bound variable f   to the name weirdness, the loop
> takes 200 times longer.  Oh, and the time to run the loop

Again, 3.0 is free form this particular bug.

> becomes quadratic in the number of iterations.

Mathematica 3.0 for Digital Unix
Copyright 1988-97 Wolfram Research, Inc.
 -- Terminal graphics initialized --

In[1]:= Module[{f, L}, L = f[]; Do[L = f[L, i], {i, 2*10^4}] ] // Timing
Stack overflow: pid 30102, proc MathKernel, addr 0x11fdfffc0, pc
0x1203b9078
Segmentation fault (core dumped)

But it happens very fast :-) 

Anyway, I have the feeling that if someone looked at the sources of
Mathematica (or Maple), he'd die sooner than Wolfram had a chance to
kill him.

> 
> Certainly lisp wouldn't do this.
> 

If an implementation did this sort of thing, people would simply
switch to an other.

Andras
From: Richard Fateman
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <4306B29E.6090700@cs.berkeley.edu>
People seem to be taking on faith that Jon is correctly
characterizing Mathematica.  In fact Mathematica is
not even deterministic, and its results depend on
peculiarities like the virtual page shared by a variable
with other variables to determine the execution of rules.

There are so many bad things about Mathematica's
syntax and semantics that most people trying to use
it will only experience a small subset of the possible
flaws.

I view as particularly damning the fact that the
underlying algorithms are typically thousands and thousands
of lines of code in C  (I think "objective C").

I have written a parser for Mathematica (as of version 3.0 or
so) in Common Lisp (google mockmma)
(google "a review of mathematica")
with some details.

Mathematica programmers by and large do not use functions.
They think they are defining functions but they are defining
rules by   f[x_]:= ....;
There are functions rather like lisp's (lambda(...)..)
but they are not typically used.

Mathematica is not a macro language that takes math
and makes an AST.  Its language is not math. As a simple
example, it requires  Sin[x]  instead of  sin x.
yeah, I know that lisp uses (sin x), but no one claims
that is ordinary math notation.

As for whether it makes an AST at all -- well, along the
way the parser does that, but Mathematica doesn't stop
there. As mentioned previously, you need to intersperse
Hold, HoldAll, Release, etc. in key places.
   Writing a REPL in Mathematica might be possible,
but given the botched scope rules, it would probably
need to scrape out the default behavior first.

Look at the mathematica newsgroup if you want to see
people complaining.


I think people are being inexplicably kind to spend
so much time to educate Jon H.    Comp.lang.lisp has a long
tradition of having a short fuse. :)  Maybe JH should get
a troll award.

RJF







Jon Harrop wrote:

> Jens Axel S�gaard wrote:
> 
>>Brian Downing wrote:
>>
>>>The problem, as mentioned in another branch of the thread here, is that
>>>the Mathematica syntax is incredibly complicated to support some of this
>>>flexibility, and the Mathematica evaluator is so /incredibly/ complex as
>>>to basically prohibit understanding of what's going to happen when
<snip>
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <43074c1b$0$1288$ed2619ec@ptn-nntp-reader02.plus.net>
Richard Fateman wrote:
> People seem to be taking on faith that Jon is correctly
> characterizing Mathematica.  In fact Mathematica is
> not even deterministic, and its results depend on
> peculiarities like the virtual page shared by a variable
> with other variables to determine the execution of rules.

Yes, like most other languages, Mathematica has undefined behaviour under
certain circumstances. The same applies to the order of evaluation of
function arguments in OCaml, for example.

> There are so many bad things about Mathematica's
> syntax and semantics that most people trying to use
> it will only experience a small subset of the possible
> flaws.

That is true for many complicated languages.

> I view as particularly damning the fact that the
> underlying algorithms are typically thousands and thousands
> of lines of code in C  (I think "objective C").

Actually they're not...

> Mathematica programmers by and large do not use functions.
> They think they are defining functions but they are defining
> rules by   f[x_]:= ....;

They are defining a mapping. That is a function.

> There are functions rather like lisp's (lambda(...)..)
> but they are not typically used.

Nonsense. I just leafed through the Mathematica newsgroup and the 14th post
is the first to contain code using anonymous functions. If anything,
anonymous functions are more common in Mathematica code because they are
more succinct (e.g. #& notation) and powerful (e.g. they can recurse).

> Mathematica is not a macro language that takes math
> and makes an AST.  Its language is not math. As a simple
> example, it requires  Sin[x]  instead of  sin x.

You can output in traditional form, for example:

In[1]:= Sin[x] // TraditionalForm

Out[1]//TraditionalForm= sin(x)

> yeah, I know that lisp uses (sin x), but no one claims
> that is ordinary math notation.

Yes, neither Mathematica nor Lisp use conventional mathematical syntax.
Mathematica is a lot closer however. Note that conventional mathematical
syntax is too ambiguous to be useful here. Also, let's not forget that
Mathematica can even typeset maths...

> As for whether it makes an AST at all -- well, along the
> way the parser does that, but Mathematica doesn't stop
> there. As mentioned previously, you need to intersperse
> Hold, HoldAll, Release, etc. in key places.

Just as you use quote in a Lisp macro.

> I think people are being inexplicably kind to spend
> so much time to educate Jon H.    Comp.lang.lisp has a long
> tradition of having a short fuse. :)  Maybe JH should get
> a troll award.

Actually, I am very impressed with the generosity of most of the people in
this group.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <1124555255.192312.310200@g47g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Richard Fateman wrote:
> > People seem to be taking on faith that Jon is correctly
> > characterizing Mathematica.  In fact Mathematica is
> > not even deterministic, and its results depend on
> > peculiarities like the virtual page shared by a variable
> > with other variables to determine the execution of rules.
>
> Yes, like most other languages, Mathematica has undefined behaviour under
> certain circumstances. The same applies to the order of evaluation of
> function arguments in OCaml, for example.

You miss the point. Wolfram doesn't SPECIFY all the behaviors which are
undefined in Mathematica's evaluation rules. Most of them are buried
where you probably won't run into them, but you aren't told where they
are. That's why RJF used the term "peculiarities". Read his review to
understand more.

Mathematica's evaluation rules are tweaked internally, probably to
avoid disastrous run-time performance. Those tweaks break the formal
model, but not in any clearly specified way. So not only are the rules
complicated and vaguely described, they also aren't the rules.

>
> > Mathematica programmers by and large do not use functions.
> > They think they are defining functions but they are defining
> > rules by   f[x_]:= ....;
>
> They are defining a mapping. That is a function.

In some sense, but what is the domain of that function? Most people
want to define functions from, say, mathematical reals onto reals.
f[x_]:= defines a mapping from Mathematica expressions to other
Mathematica expressions. Those are very different things indeed.

>
> > There are functions rather like lisp's (lambda(...)..)
> > but they are not typically used.
>
> Nonsense. I just leafed through the Mathematica newsgroup and the 14th post
> is the first to contain code using anonymous functions. If anything,

The relevant comparison is the number of mapping rules running around
compared to the number of anonymous function objects which are getting
passed around as data. Not the volume of discussion in a newsgroup.

>
> > As for whether it makes an AST at all -- well, along the
> > way the parser does that, but Mathematica doesn't stop
> > there. As mentioned previously, you need to intersperse
> > Hold, HoldAll, Release, etc. in key places.

> Just as you use quote in a Lisp macro.

NOT AT ALL. Christ, get a clue.
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <43075ea4$0$1288$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
>> Yes, like most other languages, Mathematica has undefined behaviour under
>> certain circumstances. The same applies to the order of evaluation of
>> function arguments in OCaml, for example.
> 
> You miss the point. Wolfram doesn't SPECIFY all the behaviors which are
> undefined in Mathematica's evaluation rules. Most of them are buried
> where you probably won't run into them, but you aren't told where they
> are. That's why RJF used the term "peculiarities". Read his review to
> understand more.
> 
> Mathematica's evaluation rules are tweaked internally, probably to
> avoid disastrous run-time performance. Those tweaks break the formal
> model, but not in any clearly specified way. So not only are the rules
> complicated and vaguely described, they also aren't the rules.

Yes. That is also true for most languages. SML is one of the few languages
to have a complete, formal description.

>> > Mathematica programmers by and large do not use functions.
>> > They think they are defining functions but they are defining
>> > rules by   f[x_]:= ....;
>>
>> They are defining a mapping. That is a function.
> 
> In some sense,

No. That is a function.

> but what is the domain of that function? 

expr -> expr

> Most people 
> want to define functions from, say, mathematical reals onto reals.
> f[x_]:= defines a mapping from Mathematica expressions to other
> Mathematica expressions. Those are very different things indeed.

Then use:

  f[x_Real] := ...

>> > There are functions rather like lisp's (lambda(...)..)
>> > but they are not typically used.
>>
>> Nonsense. I just leafed through the Mathematica newsgroup and the 14th
>> post is the first to contain code using anonymous functions. If anything,
> 
> The relevant comparison is the number of mapping rules running around
> compared to the number of anonymous function objects which are getting
> passed around as data. Not the volume of discussion in a newsgroup.

Anonymous functions are commonplace in Mathematica code.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <m3wtmg7d4w.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Richard Fateman wrote:

> > As for whether it makes an AST at all -- well, along the
> > way the parser does that, but Mathematica doesn't stop
> > there. As mentioned previously, you need to intersperse
> > Hold, HoldAll, Release, etc. in key places.
> 
> Just as you use quote in a Lisp macro.

OK, I'm ready to give up on you as hopeless.  But this one is so
egregious, I am still going to take the bait.  No.  This is just
totally wrong.  And the fact that you don't seem to have even a
glimmer of understanding of this, after all everyone has tried to
convey to you about it, indicates just how hopeless you are.


> > I think people are being inexplicably kind to spend
> > so much time to educate Jon H.    Comp.lang.lisp has a long
> > tradition of having a short fuse. :)  Maybe JH should get
> > a troll award.

Sadly, this is probably true...


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Richard Fateman
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <efJNe.1303$L03.380@newssvr27.news.prodigy.net>
Jon Harrop wrote:

>
>>(RJF)I view as particularly damning the fact that the
>>underlying algorithms are typically thousands and thousands
>>of lines of code in C  (I think "objective C").
> 
> 
> (JH) Actually they're not...

Direct quote from Mathematica 5.0 manual
"
Mathematica is one of the more complex software systems ever constructed. Its source code is written 
in a combination of C and Mathematica, and for Version 5, the code for the kernel consists of about 
1.5 million lines of C and 150,000 lines of Mathematica. This corresponds to roughly 50 megabytes of 
data, or some 50,000 printed pages.

The C code in Mathematica is actually written in a custom extension of C which supports certain 
memory management and object‐oriented features. "


> 
> 
>>Mathematica programmers by and large do not use functions.
>>They think they are defining functions but they are defining
>>rules by   f[x_]:= ....;
> 
> 
> They are defining a mapping. That is a function.

define  the rule
  fib[n_]:= If [n<2,1 ,fib[n-1]+fib[n-2]];
You cannot be sure in what order the "function" fib will be called, or how often.

for example,  f[5] -> f[4]+f[3] ->    f[3]+f[2]   + f[2]  ==> simplify to  f[3]+2*f[2] ->  ....

> 
>>There are functions rather like lisp's (lambda(...)..)
>>but they are not typically used.
> 
> 
> Nonsense. I just leafed through the Mathematica newsgroup and the 14th post
> is the first to contain code using anonymous functions. If anything,
> anonymous functions are more common in Mathematica code because they are
> more succinct (e.g. #& notation) and powerful (e.g. they can recurse).

More common in Mathematica than in what? Certainly not in idiomatic
Scheme and Lisp.  I don't know how Mathematica allows recursion in
anonymous functions, but the succinct version of function definition
requires that you name the argument #  , or if several, #1, #2, ...
and the scope and precedence are unclear. Use the notation
Function[{a, b, c}, body]  and you have something similar to
(lambda (a b c) body)    but less succinct.

> 
> 
>>Mathematica is not a macro language that takes math
>>and makes an AST.  Its language is not math. As a simple
>>example, it requires  Sin[x]  instead of  sin x.

Many programs e.g. TeX processors produce OUTPUT in mathematical
typeset form.  TeX's input form and internal form are
not usually claimed to be math.
> 
> 
> You can output in traditional form, for example:
> 
> In[1]:= Sin[x] // TraditionalForm
> 
> Out[1]//TraditionalForm= sin(x)
> 
> 
>>yeah, I know that lisp uses (sin x), but no one claims
>>that is ordinary math notation.
> 
> 
> Yes, neither Mathematica nor Lisp use conventional mathematical syntax.
> Mathematica is a lot closer however.

As someone pointed out,  in Mathematica,
FullForm [a+b c]   is  displayed as
//FullForm=
      Plus[a, Times[b, c]]

I would say Mathematica is hardly different from Lisp
in its internal abstraction, and certainly not "closer to math"
To reinforce this point, Mockmma, a lisp program
can do this...

(display  '(plus (a (times b c)))
-->
     a + b*c

In fact I've assigned this to freshman students using Scheme, including
raised superscripts and fraction bars. Similar programs exist in maxima,
reduce, axiom, jacal, tilu, ...


  Note that conventional mathematical
> syntax is too ambiguous to be useful here. Also, let's not forget that
> Mathematica can even typeset maths...

Ditto for lisp.  See, for example, the sourceforge maxima system
which has an option for TeX output.

> 
> 
>>As for whether it makes an AST at all -- well, along the
>>way the parser does that, but Mathematica doesn't stop
>>there. As mentioned previously, you need to intersperse
>>Hold, HoldAll, Release, etc. in key places.
> 
> 
> Just as you use quote in a Lisp macro.

No, because the alternative to a quote in lisp is "evaluate [once]".
The alternative in Mathematica to Hold is  essentially
"apply rules until no more rules apply." See Mathematica manual A.4.1.

Apologizes for the further digression on Mathematica.
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430764cd$0$17506$ed2e19e4@ptn-nntp-reader04.plus.net>
Richard Fateman wrote:
> Jon Harrop wrote:
>>>(RJF)I view as particularly damning the fact that the
>>>underlying algorithms are typically thousands and thousands
>>>of lines of code in C  (I think "objective C").
>> 
>> (JH) Actually they're not...
> 
> Direct quote from Mathematica 5.0 manual
> "...
> The C code in Mathematica is actually written in a custom extension of C
> which supports certain memory management and object?oriented features. "

Yes, neither C nor objective C. If you're objective to the verbosity of the
C, then Lisp is not the best alternative...

>>>Mathematica programmers by and large do not use functions.
>>>They think they are defining functions but they are defining
>>>rules by   f[x_]:= ....;
>> 
>> 
>> They are defining a mapping. That is a function.
> 
> define  the rule
>   fib[n_]:= If [n<2,1 ,fib[n-1]+fib[n-2]];
> You cannot be sure in what order the "function" fib will be called,
> or how often.

Exactly the same as OCaml (and most other languages).

>> Nonsense. I just leafed through the Mathematica newsgroup and the 14th
>> post is the first to contain code using anonymous functions. If anything,
>> anonymous functions are more common in Mathematica code because they are
>> more succinct (e.g. #& notation) and powerful (e.g. they can recurse).
> 
> More common in Mathematica than in what?

Other FPLs.

> Certainly not in idiomatic 
> Scheme and Lisp.  I don't know how Mathematica allows recursion in
> anonymous functions,

#0

> but the succinct version of function definition 
> requires that you name the argument #  , or if several, #1, #2, ...
> and the scope and precedence are unclear.

The scope and precedence of what are unclear?

> Use the notation 
> Function[{a, b, c}, body]  and you have something similar to
> (lambda (a b c) body)    but less succinct.

Yes. Using the notation "body&" you have something similar but more concise.

>>>Mathematica is not a macro language that takes math
>>>and makes an AST.  Its language is not math. As a simple
>>>example, it requires  Sin[x]  instead of  sin x.
> 
> Many programs e.g. TeX processors produce OUTPUT in mathematical
> typeset form.  TeX's input form and internal form are
> not usually claimed to be math.

Yes.

> As someone pointed out,  in Mathematica,
> FullForm [a+b c]   is  displayed as
> //FullForm=
>       Plus[a, Times[b, c]]

Yes.

> I would say Mathematica is hardly different from Lisp
> in its internal abstraction, and certainly not "closer to math"

If you mean Mathematica's internal representation is not closer to maths
then yes, but that isn't what we were talking about.

> To reinforce this point, Mockmma, a lisp program
> can do this...
> 
> (display  '(plus (a (times b c)))
> -->
>      a + b*c

Note that "*" isn't conventional mathematical notation (which would be "a +
b c").

>   Note that conventional mathematical
>> syntax is too ambiguous to be useful here. Also, let's not forget that
>> Mathematica can even typeset maths...
> 
> Ditto for lisp.  See, for example, the sourceforge maxima system
> which has an option for TeX output.

Ok. Mathematica typesets maths by default.

>>>As for whether it makes an AST at all -- well, along the
>>>way the parser does that, but Mathematica doesn't stop
>>>there. As mentioned previously, you need to intersperse
>>>Hold, HoldAll, Release, etc. in key places.
>> 
>> Just as you use quote in a Lisp macro.
> 
> No, because the alternative to a quote in lisp is "evaluate [once]".
> The alternative in Mathematica to Hold is  essentially
> "apply rules until no more rules apply." See Mathematica manual A.4.1.

Yes.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <60KNe.77611$Ph4.2437222@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-20, Jon Harrop <······@jdh30.plus.com> wrote:
>> Direct quote from Mathematica 5.0 manual
>> "...
>> The C code in Mathematica is actually written in a custom extension of C
>> which supports certain memory management and object?oriented features. "
>
> Yes, neither C nor objective C. If you're objective to the verbosity of the
> C, then Lisp is not the best alternative...

I can't imagine you're saying what I think you're saying, but I'll
correct it, anyway.  Lisp is vastly less verbose than C, by orders of
magnitude.  You can't compare languages at a level of terseness of
syntax and expect that to be the sole determining element of verbosity
at the level of a real program.  Anyone who would save a few keystrokes
per line in exchange for having many thousands more lines to type is
mad.

(I've performed this experiment before, by porting C applications on the
range of 10-20 kloc to Lisp, which usually ends up in the 2-5 kloc
range.  AFAICS, the savings continue to scale up as the LOC count does.)

Cheers.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <4307c609$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Julian Squires wrote:
> I can't imagine you're saying what I think you're saying, but I'll
> correct it, anyway.  Lisp is vastly less verbose than C, by orders of
> magnitude.  You can't compare languages at a level of terseness of
> syntax and expect that to be the sole determining element of verbosity
> at the level of a real program.  Anyone who would save a few keystrokes
> per line in exchange for having many thousands more lines to type is
> mad.

Yes, absolutely.

> (I've performed this experiment before, by porting C applications on the
> range of 10-20 kloc to Lisp, which usually ends up in the 2-5 kloc
> range.  AFAICS, the savings continue to scale up as the LOC count does.)

In this case, the C-like code is more like 200kLOC, the Lisp is more like
20kLOC and the ML is more like 2kLOC.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <UzRNe.77793$Ph4.2440781@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> In this case, the C-like code is more like 200kLOC, the Lisp is more like
> 20kLOC and the ML is more like 2kLOC.

Well, though I haven't tried comparing the same C programs yet in both
Lisp and ML, from my experience with both I would say that the ML
programs would tend to come out a bit larger, after a certain point.  I
have written many beautifully short programs in OCaml, but I find the
syntactical abstraction capabilities of Lisp are a win in the long run.

Depending (heavily) on the application, I would expect to see 200kloc of
C turn into around 50 kloc of Lisp, and a slightly larger amount of ML.
I guess someday (when I retire?) I'll have to try that experiment and
see.

-- 
Julian Squires
From: mikel
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <0SVNe.23567$ep5.33618968@news.sisna.com>
Julian Squires wrote:
> On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> 
>>In this case, the C-like code is more like 200kLOC, the Lisp is more like
>>20kLOC and the ML is more like 2kLOC.
> 
> 
> Well, though I haven't tried comparing the same C programs yet in both
> Lisp and ML, from my experience with both I would say that the ML
> programs would tend to come out a bit larger, after a certain point.  I
> have written many beautifully short programs in OCaml, but I find the
> syntactical abstraction capabilities of Lisp are a win in the long run.
> 
> Depending (heavily) on the application, I would expect to see 200kloc of
> C turn into around 50 kloc of Lisp, and a slightly larger amount of ML.
> I guess someday (when I retire?) I'll have to try that experiment and
> see.


I actually had occasion to write a small but nontrivial production 
application in Lisp and in Ocaml. The Ocaml version was smaller in 
memory and faster in runtime, but the Lisp version was quicker to 
implement and much easier to change and extend. The Lisp version was the 
one that got used.

The two implementations were comparable in lines of code; the Ocaml 
version was probably 8/9 the length of the Lisp version, or thereabouts.

I like both languages a lot, though I like to say I think in Lisp. I 
personally found both implementations quite readable and maintainable, 
but who knows what other programmers would think.

They were on the order of five or ten thousand lines of code, I think.
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <L3qOe.55$DJ5.70033@typhoon.nyu.edu>
Jon Harrop wrote:
> Julian Squires wrote:
> 
>>I can't imagine you're saying what I think you're saying, but I'll
>>correct it, anyway.  Lisp is vastly less verbose than C, by orders of
>>magnitude.  You can't compare languages at a level of terseness of
>>syntax and expect that to be the sole determining element of verbosity
>>at the level of a real program.  Anyone who would save a few keystrokes
>>per line in exchange for having many thousands more lines to type is
>>mad.
> 
> 
> Yes, absolutely.
> 
> 
>>(I've performed this experiment before, by porting C applications on the
>>range of 10-20 kloc to Lisp, which usually ends up in the 2-5 kloc
>>range.  AFAICS, the savings continue to scale up as the LOC count does.)
> 
> 
> In this case, the C-like code is more like 200kLOC, the Lisp is more like
> 20kLOC and the ML is more like 2kLOC.

Only because you refuse to put in all the necessary spaces.

Did I also mention that lines longer that 78 columns are a crime against 
humanity (or at least my aesthetic sense) ?  :)

Cheers
--
Marco
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430a39f1$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Marco Antoniotti wrote:
>> In this case, the C-like code is more like 200kLOC, the Lisp is more like
>> 20kLOC and the ML is more like 2kLOC.
> 
> Only because you refuse to put in all the necessary spaces.

I don't think it would grow by 10x. :-)

> Did I also mention that lines longer that 78 columns are a crime against
> humanity (or at least my aesthetic sense) ?  :)

I agree.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Olivier Drolet
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <1124747839.684079.120800@g43g2000cwa.googlegroups.com>
>> Did I also mention that lines longer that 78 columns are a crime against
>> humanity (or at least my aesthetic sense) ?  :)

>I agree.

(snip)

Luckily, most Lisp programmers (among others) also agree, as otherwise
all their programs would just be 1 LOC, thus winning out over most
other programming languages in all those programming language
comparissons you hear about on Internet... at least as far as LOC is
concerned.

(Wait! You can almost hear it, in the distance; the sound of goal-posts
shifting...)

}->

Olivier
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <acja3qno.fsf@ccs.neu.edu>
> Richard Fateman wrote:
>> People seem to be taking on faith that Jon is correctly
>> characterizing Mathematica.

For the sake of argument, yes.  I'm assuming he's correctly reporting
his personal experience interacting with Mathematica.

However, I have read your critique on Mathematica and have discussed
it with friends that are in a position to know about it.  I don't
assume that Jon has the language design and semantics experience to
see the obvious flaws.  I expect he has unconciously developed the
panglossian view that Mathematica is the best of all possible computer
algebra systems and that whatever flaws it has are justifiable because
all software has bugs and idiosyncracies.

>> In fact Mathematica is not even deterministic, and its results
>> depend on peculiarities like the virtual page shared by a variable
>> with other variables to determine the execution of rules.

> Jon Harrop <······@jdh30.plus.com> writes:
>
> Yes, like most other languages, Mathematica has undefined behaviour under
> certain circumstances. The same applies to the order of evaluation of
> function arguments in OCaml, for example.

These are *completely* different kinds of problems.  In OCaml
(presumably), the order of evaluation is not specified, but the
behavior is not undefined!  OCaml is call-by-value, so the evaluation
*must* occur before the function is called.  I haven't studied OCaml,
but I assume that these properties hold:

   Argument evaluation must be consistent with *some* order.

   Order of argument evaluation must not be detectable by pure
   functional programs.

   Writing code that depends on order of evaluation is strongly
   discouraged.

Even in languages that have well-defined order of evaluation, it is
considered poor style to use that order to implicitly sequence
operations.  

It is easy and straightforward to avoid order of evaluation
dependencies.


Does one have control over what virtual pages a variable occupies in
Mathematica?  Can one control grouping so that two variables are
required or prohibited from sharing?  Is it easy and straightforward
to avoid unexpected dependencies?

>> There are so many bad things about Mathematica's
>> syntax and semantics that most people trying to use
>> it will only experience a small subset of the possible
>> flaws.
>
> That is true for many complicated languages.

Unfortunately so, but it need not be the case.  There are certain
principles of language design that were developed *years* ago (some of
the principles are *thousands* of years old) that have withstood the
test of time.  The kinds of flaws that come from ignorance or
deliberate rejection of these principles are qualitatively different
from the ones that come from the unavoidable (but correctable) bugs in
the implementation.  Mathematica's problems are of the first kind.

>> I view as particularly damning the fact that the
>> underlying algorithms are typically thousands and thousands
>> of lines of code in C  (I think "objective C").
>
> Actually they're not...
>
>> Mathematica programmers by and large do not use functions.
>> They think they are defining functions but they are defining
>> rules by   f[x_]:= ....;
>
> They are defining a mapping. That is a function.

There are BIG differences between mathematical functions and rewrite
rules.  Even a rewrite rule that defines a function may not be the
function you think it is or expect.  Lisp hackers, computer language
theorists, and especially computer language and computer algebra
theorists know that function definition is one of the trickiest parts
of semantics.

>> As for whether it makes an AST at all -- well, along the
>> way the parser does that, but Mathematica doesn't stop
>> there. As mentioned previously, you need to intersperse
>> Hold, HoldAll, Release, etc. in key places.
>
> Just as you use quote in a Lisp macro.

Here is a prime example.

Mathematica `evaluates' an expression by iteratively applying rewrite
rules until it reaches a fixed point (until `nothing changes').
This sounds like it may be a reasonable, if unorthodox, choice for
evaluation semantics, and newbies often think that Lisp or Scheme
works this way.

Lisp's `eval' routine takes an expression as list structure and
determines what value that expression means.  It *may* be the case
that the value is itself a list structure that could be interpreted as
an expression.  This is of no consequence to eval.  The result of
evaluation is not itself iteratively evaluated.

Both Lisp and Mathematica have a mechanism for quoting, that is, for
embedding structure that superficially appears to be an expression.
In Lisp, this is QUOTE, and it's effect is simple:  the value of a
QUOTE expression is the object quoted.

In Mathematica, however, this would not work.  The value of a HOLD
expression cannot be the object that is held because *that* object
might be an expression (and thus get fed back into the Mathematica
evaluator).  Instead, the value of a HOLD expression is the HOLD
expression itself.

There is a subtle difference:  Lisp can embed *any* object in code
through use of QUOTE.  Mathematica cannot embed objects in code, but
it can almost fake it because HOLD expressions end up being constant.
You may think this is a difference that makes no difference, but it
shows up when you write higher-order macros.  A higher-order macro has
two stages of parameterization:  the first occurs when you use the
higher-order macro to define another macro, the second occurs when you
use the macro that is defined.  At which stage do you expand the
parameters that are introduced at stage 1?  For some macros, those
parameters ought to be expanded at stage 1, but for others it might be
stage 2 (it could be both!).  In Lisp, it is relatively
straightforward to control the stage at which a macro parameter is
expanded through use of the appropriate level of nesting of QUOTED
structure.  At each stage of expansion, QUOTED list structure becomes
a literal expression for the next stage.  In Mathematica, however, a
HOLD expression remains unchanged until you RELEASE it, so you can
*defer* evaluation, but you can't easily *undefer* it.

(In Lisp, QUOTE and EVAL respectively take you one step up and down
the syntactic reflect/reify tower.  At each meta-level, additional
quotes take you further up, and additional EVALs take you further
down.  In Mathematica, HOLD takes you a step up, but RELEASE takes you
*all the way down* to the bottom level.)

This is an example of being ignorant about language design principles.
If you have reflection in a language, you should make sure that each
reflective operator has a corresponding reification operator that
*exactly* undoes that reflection.  If you don't, it will be either
painful or impossible to *use* the reflection in non-trivial ways.
Mathematica chose to ignore this principle, and guess what happens.

>> I think people are being inexplicably kind to spend
>> so much time to educate Jon H.    Comp.lang.lisp has a long
>> tradition of having a short fuse. :)  Maybe JH should get
>> a troll award.

I guess I don't think he's a lost cause, yet.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <4309fe57$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Joe Marshall wrote:
> However, I have read your critique on Mathematica and have discussed
> it with friends that are in a position to know about it.  I don't
> assume that Jon has the language design and semantics experience to
> see the obvious flaws.

I have written several compilers and interpreters in OCaml (and some in C++,
a long time ago), including Mathematica JIT compilers and interpreters. So
I am well aware of Mathematica's design and semantics, as well as several
other languages. However, I have no formal training in this.

> I expect he has unconciously developed the 
> panglossian view that Mathematica is the best of all possible computer
> algebra systems and that whatever flaws it has are justifiable because
> all software has bugs and idiosyncracies.

Not at all. Mathematica is the only such program that I have used, and I
have used it quite a bit. I know of many possible improvements to
Mathematica and am keen to see it improved. However, I prefer to spend my
time improving the world's software, rather that whining about the most
obscure flaws that I can find in Mathematica.

>> Jon Harrop <······@jdh30.plus.com> writes:
>> Yes, like most other languages, Mathematica has undefined behaviour under
>> certain circumstances. The same applies to the order of evaluation of
>> function arguments in OCaml, for example.
> 
> These are *completely* different kinds of problems.  In OCaml
> (presumably), the order of evaluation is not specified, but the
> behavior is not undefined!

You cannot predict what such an OCaml program will do.

> Does one have control over what virtual pages a variable occupies in
> Mathematica?  Can one control grouping so that two variables are
> required or prohibited from sharing?  Is it easy and straightforward
> to avoid unexpected dependencies?

From experience, none of these have ever caused me a problem.

>>> There are so many bad things about Mathematica's
>>> syntax and semantics that most people trying to use
>>> it will only experience a small subset of the possible
>>> flaws.
>>
>> That is true for many complicated languages.
> 
> Unfortunately so, but it need not be the case.

Yes, absolutely.

> There are certain 
> principles of language design that were developed *years* ago (some of
> the principles are *thousands* of years old) that have withstood the
> test of time.  The kinds of flaws that come from ignorance or
> deliberate rejection of these principles are qualitatively different
> from the ones that come from the unavoidable (but correctable) bugs in
> the implementation.  Mathematica's problems are of the first kind.

Yes but Mathematica is also a commercial product that had to be developed in
a reasonable time by a relatively small group of people who, consequently,
had to have an enormous combined knowledge. I think they did a remarkable
job. I'm sure we can improve upon each of these aspects of Mathematica but
the point is that we could not improve upon all of them simultaneously.

I am not saying that it isn't constructive to study the flaws in existing
software but if you are going to do so, you should do it properly.

>>> Mathematica programmers by and large do not use functions.
>>> They think they are defining functions but they are defining
>>> rules by   f[x_]:= ....;
>>
>> They are defining a mapping. That is a function.
> 
> There are BIG differences between mathematical functions and rewrite
> rules.  Even a rewrite rule that defines a function may not be the
> function you think it is or expect.  Lisp hackers, computer language
> theorists, and especially computer language and computer algebra
> theorists know that function definition is one of the trickiest parts
> of semantics.

Can you be more specific?

>>> As for whether it makes an AST at all -- well, along the
>>> way the parser does that, but Mathematica doesn't stop
>>> there. As mentioned previously, you need to intersperse
>>> Hold, HoldAll, Release, etc. in key places.
>>
>> Just as you use quote in a Lisp macro.
> 
> Here is a prime example.
> 
> Mathematica `evaluates' an expression by iteratively applying rewrite
> rules until it reaches a fixed point (until `nothing changes').
> This sounds like it may be a reasonable, if unorthodox, choice for
> evaluation semantics, and newbies often think that Lisp or Scheme
> works this way.
> 
> Lisp's `eval' routine takes an expression as list structure and
> determines what value that expression means.  It *may* be the case
> that the value is itself a list structure that could be interpreted as
> an expression.  This is of no consequence to eval.  The result of
> evaluation is not itself iteratively evaluated.
> 
> Both Lisp and Mathematica have a mechanism for quoting, that is, for
> embedding structure that superficially appears to be an expression.
> In Lisp, this is QUOTE, and it's effect is simple:  the value of a
> QUOTE expression is the object quoted.

Yes.

> In Mathematica, however, this would not work.  The value of a HOLD
> expression cannot be the object that is held because *that* object
> might be an expression (and thus get fed back into the Mathematica
> evaluator).  Instead, the value of a HOLD expression is the HOLD
> expression itself.

I don't understand. What do you mean by "object" in the context of
Mathematica?

> There is a subtle difference:  Lisp can embed *any* object in code
> through use of QUOTE.  Mathematica cannot embed objects in code, but
> it can almost fake it because HOLD expressions end up being constant.
> You may think this is a difference that makes no difference, but it
> shows up when you write higher-order macros.  A higher-order macro has
> two stages of parameterization:  the first occurs when you use the
> higher-order macro to define another macro, the second occurs when you
> use the macro that is defined.  At which stage do you expand the
> parameters that are introduced at stage 1?  For some macros, those
> parameters ought to be expanded at stage 1, but for others it might be
> stage 2 (it could be both!).  In Lisp, it is relatively
> straightforward to control the stage at which a macro parameter is
> expanded through use of the appropriate level of nesting of QUOTED
> structure.  At each stage of expansion, QUOTED list structure becomes
> a literal expression for the next stage.  In Mathematica, however, a
> HOLD expression remains unchanged until you RELEASE it, so you can
> *defer* evaluation, but you can't easily *undefer* it.

Again, I don't understand. Are you saying that, if you were to try to write
a macro in Mathematica, you would need to apply Release manually? If so, I
agree.

> (In Lisp, QUOTE and EVAL respectively take you one step up and down
> the syntactic reflect/reify tower.  At each meta-level, additional
> quotes take you further up, and additional EVALs take you further
> down.  In Mathematica, HOLD takes you a step up, but RELEASE takes you
> *all the way down* to the bottom level.)

No. Release only removes 1 level of Hold:

Release[Hold[Hold[x]]]
Hold[x]

> This is an example of being ignorant about language design principles.
> If you have reflection in a language, you should make sure that each
> reflective operator has a corresponding reification operator that
> *exactly* undoes that reflection.  If you don't, it will be either
> painful or impossible to *use* the reflection in non-trivial ways.
> Mathematica chose to ignore this principle, and guess what happens.

Are you sure you haven't misunderstood Release?

>>> I think people are being inexplicably kind to spend
>>> so much time to educate Jon H.    Comp.lang.lisp has a long
>>> tradition of having a short fuse. :)  Maybe JH should get
>>> a troll award.
> 
> I guess I don't think he's a lost cause, yet.

How kind.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <acj93hx8.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

>>> Jon Harrop <······@jdh30.plus.com> writes:
>>> Yes, like most other languages, Mathematica has undefined behaviour under
>>> certain circumstances. The same applies to the order of evaluation of
>>> function arguments in OCaml, for example.
>> 
>
> Joe Marshall wrote:
>> These are *completely* different kinds of problems.  In OCaml
>> (presumably), the order of evaluation is not specified, but the
>> behavior is not undefined!
>
> You cannot predict what such an OCaml program will do.

I can predict that OCaml will correctly evaluate programs that have no
side effects.  This is a pretty strong prediction.  When this
predicition is not adequate (that is, when I wish to use side
effects), the language provides sequencing constructs for this
purpose.  Not using them is considered an abuse of the language.

>> Does one have control over what virtual pages a variable occupies in
>> Mathematica?  Can one control grouping so that two variables are
>> required or prohibited from sharing?  Is it easy and straightforward
>> to avoid unexpected dependencies?
>
> From experience, none of these have ever caused me a problem.

You've never used the Update function?

> I am not saying that it isn't constructive to study the flaws in existing
> software but if you are going to do so, you should do it properly.

I think Richard Fateman is qualified.

>>>> Mathematica programmers by and large do not use functions.
>>>> They think they are defining functions but they are defining
>>>> rules by   f[x_]:= ....;
>>>
>>> They are defining a mapping. That is a function.
>> 
>> There are BIG differences between mathematical functions and rewrite
>> rules.  Even a rewrite rule that defines a function may not be the
>> function you think it is or expect.  Lisp hackers, computer language
>> theorists, and especially computer language and computer algebra
>> theorists know that function definition is one of the trickiest parts
>> of semantics.
>
> Can you be more specific?

Sure.  In Lisp, you can write higher-order functions --- functions
that take functions as arguments and return functions as values.
Let's consider the set of functions.  Some of these map from the set
of functions to the set of functions.  But given a discrete domain and
range, the number of functions that can be defined over that domain
and range is       
                     domain
                range 

So the set of functions has a cardinality equal to that cardinality
raised to itself.  Only the empty set has this property.

Once you get past that difficulty, you'll find that what you are
defining is likely a series of approximations to a computable partial
function.  

But then again, you might not be defining a function at all.
The fibonacci function can be mathematically defined with this
recurrance relationship:

      F(n) = F(n+1) - F(n-1)

but that definition won't work in a computer.

>> In Mathematica, however, this would not work.  The value of a HOLD
>> expression cannot be the object that is held because *that* object
>> might be an expression (and thus get fed back into the Mathematica
>> evaluator).  Instead, the value of a HOLD expression is the HOLD
>> expression itself.
>
> I don't understand. What do you mean by "object" in the context of
> Mathematica?

I may be using a bad terminology.  In Lisp there are data structures
that can be used to represent Lisp expressions.  The data structures
*aren't* expressions --- they are just data structures:

(defvar *my-ds* nil)

(push '4 *my-ds*)
(push '3 *my-ds*)
(push '+ *my-ds*)
*my-ds*
  => (+ 3 4)

Note that this does *not* evaluate to 7.  It's a list that happens to
look like an expression.  If I continue to push elements:
(push '2 *my-ds*)
(push '* *my-ds*)
(push '5 *my-ds*)
*my-ds*
  => (5 * 2 + 3 4)

I now have a list that happens to look a lot like an infix
expression.  It *isn't* an expression, but it looks like one.

Presumably, Mathematica has some sort of data structure that can be
used in a similar way.

>> There is a subtle difference:  Lisp can embed *any* object in code
>> through use of QUOTE.  Mathematica cannot embed objects in code, but
>> it can almost fake it because HOLD expressions end up being constant.
>> You may think this is a difference that makes no difference, but it
>> shows up when you write higher-order macros.  A higher-order macro has
>> two stages of parameterization:  the first occurs when you use the
>> higher-order macro to define another macro, the second occurs when you
>> use the macro that is defined.  At which stage do you expand the
>> parameters that are introduced at stage 1?  For some macros, those
>> parameters ought to be expanded at stage 1, but for others it might be
>> stage 2 (it could be both!).  In Lisp, it is relatively
>> straightforward to control the stage at which a macro parameter is
>> expanded through use of the appropriate level of nesting of QUOTED
>> structure.  At each stage of expansion, QUOTED list structure becomes
>> a literal expression for the next stage.  In Mathematica, however, a
>> HOLD expression remains unchanged until you RELEASE it, so you can
>> *defer* evaluation, but you can't easily *undefer* it.
>
> Again, I don't understand. Are you saying that, if you were to try to write
> a macro in Mathematica, you would need to apply Release manually? If so, I
> agree.

Yes, you have to apply Release manually and that `turns on'
evaluation, but how do you `turn it off' again? 

>> (In Lisp, QUOTE and EVAL respectively take you one step up and down
>> the syntactic reflect/reify tower.  At each meta-level, additional
>> quotes take you further up, and additional EVALs take you further
>> down.  In Mathematica, HOLD takes you a step up, but RELEASE takes you
>> *all the way down* to the bottom level.)
>
> No. Release only removes 1 level of Hold:
>
> Release[Hold[Hold[x]]]
> Hold[x]

That's not what I meant.  I meant that once Released, the substitution
rules are iteratively applied until the form is fully reduced.  You
cannot do a `one-step' Release.

>> This is an example of being ignorant about language design principles.
>> If you have reflection in a language, you should make sure that each
>> reflective operator has a corresponding reification operator that
>> *exactly* undoes that reflection.  If you don't, it will be either
>> painful or impossible to *use* the reflection in non-trivial ways.
>> Mathematica chose to ignore this principle, and guess what happens.
>
> Are you sure you haven't misunderstood Release?

No, I'm not sure, but that's what I infer from Fateman's paper and
what you have been saying.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430a54b5$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Joe Marshall wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> You cannot predict what such an OCaml program will do.
> 
> I can predict that OCaml will correctly evaluate programs that have no
> side effects.

IIRC, there was some interesting work at Oxford recently that demonstrated
how the side-effect-free (and without laziness) subset of ML is strictly
less powerful.

> This is a pretty strong prediction.  When this 
> predicition is not adequate (that is, when I wish to use side
> effects), the language provides sequencing constructs for this
> purpose.  Not using them is considered an abuse of the language.

Yes.

>> From experience, none of these have ever caused me a problem.
> 
> You've never used the Update function?

Nope. Indeed, I had never even heard of the Update function.

>>> There are BIG differences between mathematical functions and rewrite
>>> rules.  Even a rewrite rule that defines a function may not be the
>>> function you think it is or expect.  Lisp hackers, computer language
>>> theorists, and especially computer language and computer algebra
>>> theorists know that function definition is one of the trickiest parts
>>> of semantics.
>>
>> Can you be more specific?
> 
> Sure.  In Lisp, you can write higher-order functions --- functions
> that take functions as arguments and return functions as values.
> Let's consider the set of functions.  Some of these map from the set
> of functions to the set of functions.  But given a discrete domain and
> range, the number of functions that can be defined over that domain
> and range is
>                      domain
>                 range
> 
> So the set of functions has a cardinality equal to that cardinality
> raised to itself.  Only the empty set has this property.
> 
> Once you get past that difficulty, you'll find that what you are
> defining is likely a series of approximations to a computable partial
> function.

I wouldn't apply any of that theory to Mathematica directly. In Mathematica,
you only have replacement rules that locate ASTs matching given patterns
and substitute them with ASTs. So there are no functions that map integers
to functions in Mathematica, for example.

> But then again, you might not be defining a function at all.
> The fibonacci function can be mathematically defined with this
> recurrance relationship:
> 
>       F(n) = F(n+1) - F(n-1)
> 
> but that definition won't work in a computer.

No. Firstly, your definition is both wrong and incomplete (it needs base
cases before it will work, even in maths), it should be:

  F(n) = F(n-1) + F(n-2)
  F(1) = F(2) = 1

Secondly, you can type this into Mathematica and compute Fibonacci numbers
"in a computer":

In[2]:= F[n_] := F[n-1] + F[n-2]

In[3]:= F[1] = F[2] = 1

Out[3]= 1

In[4]:= F[10]

Out[4]= 55

You can get a better idea of how Mathematica's fixed point based evaluation
is working by looking at the equivalent OCaml function, written using
pattern matching:

# let rec f n = match n with
    | 1 | 2 -> 1
    | n -> f(n-1) + f(n-2);;
val f : int -> int = <fun>
# f 10;;
- : int = 55

You have to do little more than decorate with polymorphic variant type
constructors and add substitution until fixed point to get a
mini-Mathematica interpreter written in OCaml:

# let rec eval t =
  let t' = match t with
    | `Seq (`Plus, [i; j]) -> begin match eval i, eval j with
      | `Int i, `Int j -> `Int (i + j)
      | i, j -> `Seq (`Plus, [i; j])
      end
    | `Seq (`F, [`Int (1 | 2)]) -> `Int 1
    | `Seq (`F, [n]) ->
        let n = eval n in
        let e1 = eval (`Seq (`Plus, [n; `Int (-1)])) in
        let e2 = eval (`Seq (`Plus, [n; `Int (-2)])) in
        eval (`Seq (`Plus, [`Seq (`F, [e1]); `Seq (`F, [e2])]))
    | ast -> ast in
  if t = t' then t else eval t';;
val eval : ([> `Int of int | `Seq of [> `F |`Plus ] * 'a list ] as 'a)
  -> 'a = <fun>

For example, this can evaluate "1+2":

# eval (`Seq (`Plus, [`Int 1; `Int 2]));;
- : [> `Int of int | `Seq of [> `F | `Plus ] * 'a list ] as 'a = `Int 3

As the rules for the Fibonacci recurrence relation are built in, it can also
compute Fibonacci numbers:

# eval (`Seq (`F, [`Int 10]));;
- : [> `Int of int | `Seq of [> `F | `Plus ] * 'a list ] as 'a = `Int 55

You can probably write that interpreter more succinctly in Lisp thanks to
quote. However, it is nice that the OCaml compiler infers the type of the
(polymorphic variant) AST and statically type checks its use.

>>> In Mathematica, however, this would not work.  The value of a HOLD
>>> expression cannot be the object that is held because *that* object
>>> might be an expression (and thus get fed back into the Mathematica
>>> evaluator).  Instead, the value of a HOLD expression is the HOLD
>>> expression itself.
>>
>> I don't understand. What do you mean by "object" in the context of
>> Mathematica?
> 
> I may be using a bad terminology.  In Lisp there are data structures
> that can be used to represent Lisp expressions.  The data structures
> *aren't* expressions --- they are just data structures:
> 
> (defvar *my-ds* nil)
> 
> (push '4 *my-ds*)
> (push '3 *my-ds*)
> (push '+ *my-ds*)
> *my-ds*
>   => (+ 3 4)
> 
> Note that this does *not* evaluate to 7.  It's a list that happens to
> look like an expression.  If I continue to push elements:
> (push '2 *my-ds*)
> (push '* *my-ds*)
> (push '5 *my-ds*)
> *my-ds*
>   => (5 * 2 + 3 4)
> 
> I now have a list that happens to look a lot like an infix
> expression.  It *isn't* an expression, but it looks like one.

Yes. It is an abstract syntax tree (AST). In Mathematica, you only have
ASTs.

> Presumably, Mathematica has some sort of data structure that can be
> used in a similar way.

Yes, in Mathematica you use an AST that looks like a list. It is written:

  {1, 2, 3}

but it is actually stored as the AST (seen in "FullForm"):

  List[1, 2, 3]

There are functions like Part that allow you to extract "elements" (ASTs)
from these "lists" (ASTs). These functions can be implemented as
replacement rules (also ASTs). For example, a replacement rule to extract
the first element (the head) of a list may be written:

  {h_, ___} -> h

But this is just syntactic sugar for another AST:

  Rule[List[Pattern[h, Blank[]], BlankNullSequence[]], h]

Mathematica's evaluation strategy (replace repeatedly, to fixed point) is
equivalent to the recursion of a term-level Lisp interpreter (or any other
language). If Mathematica is fed rules that never reach fixed point then it
hangs. If a term-level Lisp interpreter is fed a program that recurses
indefinitely then it also hangs.

> Yes, you have to apply Release manually and that `turns on'
> evaluation, but how do you `turn it off' again?

You can apply Hold again. As a shortcut, if just you want to evaluate what
is inside a Hold[...] AST, is to use Hold[Evaluate[...]] which evaluates
the inner expression. For example:

In[5]:= Hold[Evaluate[1 + 2 + Hold[3 + 4]]]

Out[5]= Hold[3 + Hold[3 + 4]]

>>> (In Lisp, QUOTE and EVAL respectively take you one step up and down
>>> the syntactic reflect/reify tower.  At each meta-level, additional
>>> quotes take you further up, and additional EVALs take you further
>>> down.  In Mathematica, HOLD takes you a step up, but RELEASE takes you
>>> *all the way down* to the bottom level.)
>>
>> No. Release only removes 1 level of Hold:
>>
>> Release[Hold[Hold[x]]]
>> Hold[x]
> 
> That's not what I meant.  I meant that once Released, the substitution
> rules are iteratively applied until the form is fully reduced.  You
> cannot do a `one-step' Release.

That is like saying that you do not want the result of a recursive function
call but, rather, you want to inline the body of the function having
substituted the arguments.

In Mathematica, you cannot apply all of the built-in rules once only (AFAIK)
but you can apply specified rules once only. For example, this gives the
body of the "recursive call" F[10] with its argument substituted:

In[8]:= F[10] /. {F[n_] -> F[n - 1] + F[n - 2], F[1] -> 1, F[2] -> 1}

Out[8]= F[8] + F[9]

>>> This is an example of being ignorant about language design principles.
>>> If you have reflection in a language, you should make sure that each
>>> reflective operator has a corresponding reification operator that
>>> *exactly* undoes that reflection.  If you don't, it will be either
>>> painful or impossible to *use* the reflection in non-trivial ways.
>>> Mathematica chose to ignore this principle, and guess what happens.
>>
>> Are you sure you haven't misunderstood Release?
> 
> No, I'm not sure, but that's what I infer from Fateman's paper and
> what you have been saying.

Ok. I think you're on the right track and hopefully what I just said will
clarify things. Mathematica's evaluation strategy is not like those of
other languages but it does work very well, especially for symbolic maths.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Greg Buchholz
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <1124756816.807878.50510@z14g2000cwz.googlegroups.com>
Jon Harrop wrote:
> Joe Marshall wrote:
> > The fibonacci function can be mathematically defined with this
> > recurrance relationship:
> >
> >       F(n) = F(n+1) - F(n-1)
> >
> > but that definition won't work in a computer.
>
> No. Firstly, your definition is both wrong and incomplete (it needs base
> cases before it will work, even in maths), it should be:
>
>   F(n) = F(n-1) + F(n-2)
>   F(1) = F(2) = 1
>

Hmm...
   F(n)   = F(n-1)   + F(n-2)

Let  n = m+1, then substitute...
   F(m+1) = F(m+1-1) + F(m+1-2)

simplify...
   F(m+1) = F(m)     + F(m-1)

solve for F(m)...
   F(m)   = F(m+1) - F(m-1)
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430b0847$0$97109$ed2619ec@ptn-nntp-reader03.plus.net>
Greg Buchholz wrote:
> Hmm...
>    F(n)   = F(n-1)   + F(n-2)
> 
> Let  n = m+1, then substitute...
>    F(m+1) = F(m+1-1) + F(m+1-2)
> 
> simplify...
>    F(m+1) = F(m)     + F(m-1)
> 
> solve for F(m)...
>    F(m)   = F(m+1) - F(m-1)

D'oh. Very true. Ok, so it was right and incomplete.

I agree with Joe that rewrite rules are not like mathematical functions but
the original topic of discussion was the relationship between rewrite rules
and functions in other programming languages (like Lisp). If you use that
recurrence relation directly then Lisp, OCaml and Mathematica programs will
not terminate, equivalently. So unless I've missed something else, I can't
see how this example shows a difference between them.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Edi Weitz
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <uslx16v28.fsf@agharta.de>
On Mon, 22 Aug 2005 23:39:21 +0100, Jon Harrop <······@jdh30.plus.com> wrote:

> No. Firstly, your definition is both wrong and [...]

It isn't and you didn't get his point.  You're better off if you don't
assume you're smarter than Joe Marshall...

Cheers,
Edi.

-- 

Lisp is not dead, it just smells funny.

Real email: (replace (subseq ·········@agharta.de" 5) "edi")
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <wtmcim7t.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> I agree with Joe that rewrite rules are not like mathematical functions but
> the original topic of discussion was the relationship between rewrite rules
> and functions in other programming languages (like Lisp).

Actually, I was adressing this:

  Richard Fateman wrote:
  > Mathematica programmers by and large do not use functions.
  > They think they are defining functions but they are defining
  > rules by   f[x_]:= ....;

to which you replied:

  > They are defining a mapping. That is a function.


> If you use that recurrence relation directly then Lisp, OCaml and
> Mathematica programs will not terminate, equivalently. So unless
> I've missed something else, I can't see how this example shows a
> difference between them.

It isn't meant to.  It's meant to point out that although the
recurrence relation defines a mapping it does *not* define a function

That is, things that syntactically appear to be functions in
Mathematica and Lisp are not necessarily functions.

>> Sure.  In Lisp, you can write higher-order functions --- functions
>> that take functions as arguments and return functions as values.
>> Let's consider the set of functions.  Some of these map from the set
>> of functions to the set of functions.  But given a discrete domain and
>> range, the number of functions that can be defined over that domain
>> and range is
>>                      domain
>>                 range
>>
>> So the set of functions has a cardinality equal to that cardinality
>> raised to itself.  Only the empty set has this property.
>>
>> Once you get past that difficulty, you'll find that what you are
>> defining is likely a series of approximations to a computable partial
>> function.


Jon Harrop <······@jdh30.plus.com> writes:
>
> I wouldn't apply any of that theory to Mathematica directly. In Mathematica,
> you only have replacement rules that locate ASTs matching given patterns
> and substitute them with ASTs.

As it turns out, this is a computation model that is as powerful as
lambda calculus or a Turing machine.  (See Post production systems.)
Whether you apply the theory or not, you still have to justify whether
the replacement rule defines a function under repeated application.

> So there are no functions that map integers to functions in
> Mathematica, for example.

So write one.

>> But then again, you might not be defining a function at all.
>> The fibonacci function can be mathematically defined with this
>> recurrance relationship:
>>
>>       F(n) = F(n+1) - F(n-1)
>>
>> but that definition won't work in a computer.
>
> No. Firstly, your definition is both wrong and incomplete (it needs base
> cases before it will work, even in maths),

No prob.  F(10) = 89, F(5) = 8

Is the fibonacci function not a solution to the recurrance relationship?

> it should be:
>   F(n) = F(n-1) + F(n-2)
>   F(1) = F(2) = 1

Yes, that's more traditional, but why did you change it?  What's wrong
with the definition above?  Why do you think *you* defined a function?

(Incidentally, I found a similar definition in a math book that had
the annoying habit of specifying recursive formulae by equations such
as  f(n): g (f(n-1), f(n+1)).  It's idiocy like this that makes me
hate math books.)

> Secondly, you can type this into Mathematica and compute Fibonacci numbers
> "in a computer":
>[elided]


>> Presumably, Mathematica has some sort of data structure that can be
>> used in a similar way.
>
> Yes, in Mathematica you use an AST that looks like a list. It is written:
>
>   {1, 2, 3}
>
> but it is actually stored as the AST (seen in "FullForm"):
>
>   List[1, 2, 3]
>
> There are functions like Part that allow you to extract "elements" (ASTs)
> from these "lists" (ASTs). These functions can be implemented as
> replacement rules (also ASTs). For example, a replacement rule to extract
> the first element (the head) of a list may be written:
>
>   {h_, ___} -> h
>
> But this is just syntactic sugar for another AST:
>
>   Rule[List[Pattern[h, Blank[]], BlankNullSequence[]], h]
>
> Mathematica's evaluation strategy (replace repeatedly, to fixed point) is
> equivalent to the recursion of a term-level Lisp interpreter (or any other
> language). If Mathematica is fed rules that never reach fixed point then it
> hangs. If a term-level Lisp interpreter is fed a program that recurses
> indefinitely then it also hangs.

So what's the head of List[3+4, 5 6]?

>> Yes, you have to apply Release manually and that `turns on'
>> evaluation, but how do you `turn it off' again?
>
> You can apply Hold again. As a shortcut, if just you want to evaluate what
> is inside a Hold[...] AST, is to use Hold[Evaluate[...]] which evaluates
> the inner expression. For example:
>
> In[5]:= Hold[Evaluate[1 + 2 + Hold[3 + 4]]]
>
> Out[5]= Hold[3 + Hold[3 + 4]]
>
>>>> (In Lisp, QUOTE and EVAL respectively take you one step up and down
>>>> the syntactic reflect/reify tower.  At each meta-level, additional
>>>> quotes take you further up, and additional EVALs take you further
>>>> down.  In Mathematica, HOLD takes you a step up, but RELEASE takes you
>>>> *all the way down* to the bottom level.)
>>>
>>> No. Release only removes 1 level of Hold:
>>>
>>> Release[Hold[Hold[x]]]
>>> Hold[x]
>>
>> That's not what I meant.  I meant that once Released, the substitution
>> rules are iteratively applied until the form is fully reduced.  You
>> cannot do a `one-step' Release.
>
> That is like saying that you do not want the result of a recursive function
> call but, rather, you want to inline the body of the function having
> substituted the arguments.

Exactly.  This is what you want when you do higher-order macros.

Suppose we wanted to do this odd thing.  We want a number of lisp
variables to each be bound to a list that is an example of how the
variable might be defined.  That is,

*foo*
 => (defvar *foo* 'hello)

*a-constant*
 => (defconstant *a-constant* 33)

Easy enough:

(defvar *foo* (list 'defvar '*foo* ''hello))

(defconstant *a-constant* (list 'defconstant '*a-constant* ''33))

But we could make a pair of macros to do this for us:

(defmacro define-example-variable (name sample-value)
  `(defvar ,name `(defvar ,',name ,,sample-value)))

(defmacro define-example-constant (name sample-value)
  `(defconstant ,name `(defconstant ,',name ,,sample-value)))

(define-example-variable *bar* 11)

 *bar*
   => (defvar *bar* 11)

But those macros are awfully similar, let's generate them
automatically.

(defmacro define-example-definer (macro-name definer)
  `(defmacro ,macro-name (name value)
     `(,',definer ,name `(,',',definer ,',name ,,value))))

Now we can write this:

(define-example-definer define-example-variable defvar)
(define-example-definer define-example-constant defconstant)

(define-example-variable *baz* (+ 2 3))

*baz*
  => (defvar *baz* 5)


Now in the definition of define-example-definer, we can see an example
of why we want to control single steps of expansion.  The ,',name
expression is substituted when the defined-macro is called, but the
quoting keeps it from being further substituted.  The ,,value is also
substituted when the macro is called, but because it does not have the
intermediate quote, it is evaluated when generated form is evaluated.

This is where HOLD and RELEASE differ from EVAL and QUOTE.  HOLD and
RELEASE allow you to write the equivalent of ,,value , but not the
equivalent of ,',name.

> Ok. I think you're on the right track and hopefully what I just said will
> clarify things. Mathematica's evaluation strategy is not like those of
> other languages but it does work very well, especially for symbolic maths.

Fateman argues (convincingly) that this is actually a poor choice for
symbolic maths:

  ``This technique is too expensive to apply correctly, and somewhat
    haphazard when implemented heuristically. It appears to be one of
    the more negative and unforeseen consequences of relying heavily
    on rules as though they were procedure calls.''

    http://http.cs.berkeley.edu/~fateman/papers/mma.review.pdf

Given that other computer algebras seem to work without resorting to
infinite rule-rewrite, it's hard to see what this buys you.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430b932f$0$22923$ed2619ec@ptn-nntp-reader01.plus.net>
Joe Marshall wrote:
> It isn't meant to.  It's meant to point out that although the
> recurrence relation defines a mapping it does *not* define a function

Would you say that the Fibonacci recurrence relation is a recursive mapping
and that the solution to the recurrence relation (with two bases cases) is
the Fibonacci function, in the mathematical sense? So the recurrence
relation is not a function?

The Fibonacci function (in the programming language sense) as a pattern
match is a function, just not in the mathematical sense? Indeed, even the
non-terminating program is a function in the non-mathematical sense, albeit
a useless one?

> That is, things that syntactically appear to be functions in
> Mathematica and Lisp are not necessarily functions.

In the mathematical sense, yes. Most programmers would say that a Lisp
function is a function. That makes "function" a homonym. So we must be
careful to distinguish between the different meanings of this word?

> Jon Harrop <······@jdh30.plus.com> writes:
>> I wouldn't apply any of that theory to Mathematica directly. In
>> Mathematica, you only have replacement rules that locate ASTs matching
>> given patterns and substitute them with ASTs.
> 
> As it turns out, this is a computation model that is as powerful as
> lambda calculus or a Turing machine.  (See Post production systems.)

Yes.

> Whether you apply the theory or not, you still have to justify whether
> the replacement rule defines a function under repeated application.

If you are claiming to represent a mathematical function as a recursive
(programming) function or a replacement rule that is repeated to fixed
point, yes.

>> So there are no functions that map integers to functions in
>> Mathematica, for example.
> 
> So write one.

Let me rephrase: "It is not possible to write functions that map integers to
functions in Mathematica". As Richard would say, you can only write
something that appears to do that from a user's point of view. For example,
the following looks like a curried power function:

f[n_Integer] := Function[{x}, x^n]

but it is actually a replacement rule that finds ASTs that contain a single
integer and replaces them with an AST that replaces "x" with "x^n".

>> No. Firstly, your definition is both wrong and incomplete (it needs base
>> cases before it will work, even in maths),
> 
> No prob.  F(10) = 89, F(5) = 8
> 
> Is the fibonacci function not a solution to the recurrance relationship?

For "function" in the mathematical sense, yes.

>> it should be:
>>   F(n) = F(n-1) + F(n-2)
>>   F(1) = F(2) = 1
> 
> Yes, that's more traditional, but why did you change it?

To compute it without using RSolve.

> What's wrong with the definition above?

As you said before, it won't work "on a computer" directly. However, I can't
formalise that last sentence.

> Why do you think *you* defined a function? 

Because my version could terminate when written as a (programming) function.

>> Mathematica's evaluation strategy (replace repeatedly, to fixed point) is
>> equivalent to the recursion of a term-level Lisp interpreter (or any
>> other language). If Mathematica is fed rules that never reach fixed point
>> then it hangs. If a term-level Lisp interpreter is fed a program that
>> recurses indefinitely then it also hangs.
> 
> So what's the head of List[3+4, 5 6]?

The symbol "List".

>> That is like saying that you do not want the result of a recursive
>> function call but, rather, you want to inline the body of the function
>> having substituted the arguments.
> 
> Exactly.  This is what you want when you do higher-order macros.

Presumably higher-order macros only let you make new definitions that do
that, i.e. you can't do that to a previous definition?

> Suppose we wanted to do this odd thing.  We want a number of lisp
> variables to each be bound to a list that is an example of how the
> variable might be defined.  That is,
> 
> *foo*
>  => (defvar *foo* 'hello)

Let me try to translate these into Mathematica as they come. I'll pretend
that SetDelayed (:=) is defvar and Set (=) is defconstant.

In[1]:= foo := hello

> *a-constant*
>  => (defconstant *a-constant* 33)

In[2]:= aconstant = 33

Out[2]= 33

> Easy enough:
> 
> (defvar *foo* (list 'defvar '*foo* ''hello))

In[3]:= foo := Hold[foo := hello]

> (defconstant *a-constant* (list 'defconstant '*a-constant* ''33))

In[4]:= aconstant = Hold[aconstant := 33]

Out[4]= Hold[aconstant := 33]

> But we could make a pair of macros to do this for us:
> 
> (defmacro define-example-variable (name sample-value)
>   `(defvar ,name `(defvar ,',name ,,sample-value)))

In[5]:= DefineExampleVariable[name_, value_] :=
          (name := Hold[name := value])

> (defmacro define-example-constant (name sample-value)
>   `(defconstant ,name `(defconstant ,',name ,,sample-value)))

In[6]:= DefineExampleConstant[name_, value_] :=
          (name := (name := value))

> (define-example-variable *bar* 11)

In[7]:= DefineExampleVariable[bar, 11]

>  *bar*
>    => (defvar *bar* 11)

In[8]:= bar

Out[8]= Hold[bar := 11]

> But those macros are awfully similar, let's generate them
> automatically.
> 
> (defmacro define-example-definer (macro-name definer)
>   `(defmacro ,macro-name (name value)
>      `(,',definer ,name `(,',',definer ,',name ,,value))))

In[9]:= DefineExampleDefiner[Macro_, Definer_] :=
          Definer[Macro[name_, value_], (name := Hold[name := value])]

> Now we can write this:
> 
> (define-example-definer define-example-variable defvar)
> (define-example-definer define-example-constant defconstant)

In[10]:= DefineExampleDefiner[DefineExampleVariable, SetDelayed]

In[11]:= DefineExampleDefiner[DefineExampleConstant, Set]

> (define-example-variable *baz* (+ 2 3))

In[12]:= DefineExampleVariable[baz, 2+3]

> *baz*
>   => (defvar *baz* 5)

In[13]:= baz

Out[13]= Hold[baz := 5]

> Now in the definition of define-example-definer, we can see an example
> of why we want to control single steps of expansion.

Yes.

> The ,',name 
> expression is substituted when the defined-macro is called, but the
> quoting keeps it from being further substituted.

Yes. I think that is the same as "a:= (Evaluate[b] := c)". For example, the
following "macro" "a" takes an argument "i" and defines a "macro" called
with i=1 => "x", i=2 => "y" and i=3 => "z" to be the unevaluated expression
"3+5+7":

In[1]:= a[i_] := (Evaluate[{x, y, z}[[i]]] := 3+5+7)

In[2]:= a[2]

In[3]:= y

Out[3]= 15

In[4]:= FullDefinition[y]

Out[4]= y := 3 + 5 + 7

I should probably have wrapped "name" in Evaluate in the earlier examples as
well. A better way of doing this in Mathematica may be to use
Module[{name2=name}, ...] to hoist the evaluation of name.

> The ,,value is also 
> substituted when the macro is called, but because it does not have the
> intermediate quote, it is evaluated when generated form is evaluated.
> 
> This is where HOLD and RELEASE differ from EVAL and QUOTE.  HOLD and
> RELEASE allow you to write the equivalent of ,,value , but not the
> equivalent of ,',name.

If the above isn't the equivalent of ,',name (and I'm not if it is) then I'm
very confused. :-)

> Given that other computer algebras seem to work without resorting to
> infinite rule-rewrite, it's hard to see what this buys you.

Mathematica is the only CAS that I know so I can't really comment, suffice
to say that even if it doesn't buy you anything it still works very well,
IMHO.

However, I maintain that the rewrite to fixed point is a separate issue from
EVAL and QUOTE vs Release and Hold.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <k6ibcnmy.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
>> It isn't meant to.  It's meant to point out that although the
>> recurrence relation defines a mapping it does *not* define a function
>
> Would you say that the Fibonacci recurrence relation is a recursive mapping
> and that the solution to the recurrence relation (with two bases cases) is
> the Fibonacci function, in the mathematical sense? So the recurrence
> relation is not a function?

I think I'd say that since a function is an abstract mathematical
object, that the recurrence relation is a specification for a
function.  I'd say that the Fibonacci function `satisfies' the
recurrance relation, but I'd hesitate to say that the Fibonacci
function is a `solution' to the recurrance relation.  (The recurrance
relation isn't a `problem', it is a `property'.)

> The Fibonacci function (in the programming language sense) as a pattern
> match is a function, just not in the mathematical sense? Indeed, even the
> non-terminating program is a function in the non-mathematical sense, albeit
> a useless one?

Lisp (and presumably Mathematica) compute the least fixed-point of the
non-recursive kernel function 

 (lambda (f) (lambda (x) (if (< x 2) x (+ (f (- x 2)) (f (- x 1))))))

The kernel function is indeed a function, but it isn't a function from
integers to integers, it's a function from f:int->int to f:int->int.

>> That is, things that syntactically appear to be functions in
>> Mathematica and Lisp are not necessarily functions.
>
> In the mathematical sense, yes. Most programmers would say that a Lisp
> function is a function. That makes "function" a homonym. So we must be
> careful to distinguish between the different meanings of this word?

Yes.  When Richard Fatemen wrote:
  > Mathematica programmers by and large do not use functions.
  > They think they are defining functions but they are defining
  > rules by   f[x_]:= ....;

He *meant* (or my interpretation of what he meant) that something like
    f[x_] := 1 + Sin[x]

appears to most Mathematica programmers as a procedural (function)
definition: 
  Upon invokation, compute the Sin of x and add 1 to the result.

But that few Mathematica programmers realise that what they have
written is more along the lines of:

  If the pattern  f[ ... ] appears in an input, replace it with the
  template  1 + Sin[...]

And that this misconception is often a source of difficulty.

I could be wrong about what Prof. Fateman meant.

>>> No. Firstly, your definition is both wrong and incomplete (it needs base
>>> cases before it will work, even in maths),
>> 
>> No prob.  F(10) = 89, F(5) = 8
>> 
>> Is the fibonacci function not a solution to the recurrance relationship?
>
> For "function" in the mathematical sense, yes.
>
>>> it should be:
>>>   F(n) = F(n-1) + F(n-2)
>>>   F(1) = F(2) = 1
>> 
>> Yes, that's more traditional, but why did you change it?
>
> To compute it without using RSolve.
>
>> What's wrong with the definition above?
>
> As you said before, it won't work "on a computer" directly. However, I can't
> formalise that last sentence.

My `definition', 
   F(n) = F(n+1) - F(n-1)
   F(10) = 89, F(5) = 8

actually specifies the least fixed-point of the kernel function:

  (lambda (f)
    (lambda (x)
      (cond ((= x 10) 89)
            ((= x 5) 8)
            (t (- (f (+ x 1)) (f (- x 1)))))))

which is _|_, that is, it doesn't converge.

>>> That is like saying that you do not want the result of a recursive
>>> function call but, rather, you want to inline the body of the function
>>> having substituted the arguments.
>> 
>> Exactly.  This is what you want when you do higher-order macros.
>
> Presumably higher-order macros only let you make new definitions that do
> that, i.e. you can't do that to a previous definition?
>
>> Suppose we wanted to do this odd thing.  We want a number of lisp
>> variables to each be bound to a list that is an example of how the
>> variable might be defined.  That is,
>> 
>> *foo*
>>  => (defvar *foo* 'hello)
>
> Let me try to translate these into Mathematica as they come. I'll pretend
> that SetDelayed (:=) is defvar and Set (=) is defconstant.
>
> In[1]:= foo := hello
>
>> *a-constant*
>>  => (defconstant *a-constant* 33)
>
> In[2]:= aconstant = 33
>
> Out[2]= 33

Not quite.  I want this behavior:

  In[3]:= foo

  Out[3]= foo := hello

  In[4]:= aconstant

  Out[4]= aconstant = 33


If we can start there I think we can find the differences between EVAL
and RELEASE.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430cf3d4$0$97094$ed2619ec@ptn-nntp-reader03.plus.net>
I agree with what you've said about functions.

Joe Marshall wrote:
>  (lambda (f) (lambda (x) (if (< x 2) x (+ (f (- x 2)) (f (- x 1))))))
> 
> The kernel function is indeed a function, but it isn't a function from
> integers to integers, it's a function from f:int->int to f:int->int.

Until you apply the Y combinator, at which point it becomes int -> int, of
course:

# let rec f x = if x<2 then x else f(x-2)+f(x-1);;
val f : int -> int = <fun>

Is there benefit in dealing with what you're calling the "kernel" function?

>     f[x_] := 1 + Sin[x]
> 
> appears to most Mathematica programmers as a procedural (function)
> definition:
>   Upon invokation, compute the Sin of x and add 1 to the result.
> 
> But that few Mathematica programmers realise that what they have
> written is more along the lines of:
> 
>   If the pattern  f[ ... ] appears in an input, replace it with the
>   template  1 + Sin[...]

Yes.

> And that this misconception is often a source of difficulty.

I never found it to be a source of difficulty. Also, when I started using
Mathematica I had little relevant knowledge.

>>> *foo*
>>>  => (defvar *foo* 'hello)
>>
>> Let me try to translate these into Mathematica as they come. I'll pretend
>> that SetDelayed (:=) is defvar and Set (=) is defconstant.
>>
>> In[1]:= foo := hello
>>
>>> *a-constant*
>>>  => (defconstant *a-constant* 33)
>>
>> In[2]:= aconstant = 33
>>
>> Out[2]= 33
> 
> Not quite.  I want this behavior:
> 
>   In[3]:= foo
> 
>   Out[3]= foo := hello
> 
>   In[4]:= aconstant
> 
>   Out[4]= aconstant = 33

What's wrong with the definitions I gave:

In[1]:= foo := Hold[foo := hello]

In[2]:= foo

Out[2]= Hold[foo := hello]

In[3]:= aconstant = Hold[aconstant := 33]

Out[3]= Hold[aconstant := 33]

> If we can start there I think we can find the differences between EVAL
> and RELEASE.

It's probably time we started a new thread as well...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <u0heuk8h.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> I agree with what you've said about functions.
>
> Joe Marshall wrote:
>>  (lambda (f) (lambda (x) (if (< x 2) x (+ (f (- x 2)) (f (- x 1))))))
>> 
>> The kernel function is indeed a function, but it isn't a function from
>> integers to integers, it's a function from f:int->int to f:int->int.
>
> Until you apply the Y combinator, at which point it becomes int -> int, of
> course:
>
> # let rec f x = if x<2 then x else f(x-2)+f(x-1);;
> val f : int -> int = <fun>

In this case, yes, but Y can also give you  f:int->_|_

> Is there benefit in dealing with what you're calling the "kernel" function?

In an earlier post, I asked:
    > What's wrong with the definition above?

to which you replied:
    As you said before, it won't work "on a computer"
    directly. However, I can't formalise that last sentence.

I was attempting to provide the formalism.

>>     f[x_] := 1 + Sin[x]
>> 
>> appears to most Mathematica programmers as a procedural (function)
>> definition:
>>   Upon invokation, compute the Sin of x and add 1 to the result.
>> 
>> But that few Mathematica programmers realise that what they have
>> written is more along the lines of:
>> 
>>   If the pattern  f[ ... ] appears in an input, replace it with the
>>   template  1 + Sin[...]
>
> Yes.
>
>> And that this misconception is often a source of difficulty.
>
> I never found it to be a source of difficulty. Also, when I started using
> Mathematica I had little relevant knowledge.

Obviously your experience and the experience of Prof. Fateman differ.

I have to say that *I* was surprised that Mathematica operated by
`pattern match and rewrite until nothing changes'.  That particular
idea is used in a number of computer applications, but it seems to be
of most use in the domain of theorem proving and logic programming.  A
big problem in those domains is demonstrating that your rule set makes
sense.  Adding a correct rule to a correct rule set does not
necessarily yield a new correct rule set.  The experts in the field
acknowledge that it is very hard to reason about rule sets, so I would
think that it would be extraordinarily difficult for a casual user to
do so.  

It also seems to me to be far more likely that a casual Mathematica
user has had experience with procedural languages such as C, Basic,
Pascal, Fortran, or Java.  On the surface, the rule 

  f[x_] := 1 + Sin[x]

really looks like a statement in one of those languages (I certainly
would have thought it was).

>>>> *foo*
>>>>  => (defvar *foo* 'hello)
>>>
>>> Let me try to translate these into Mathematica as they come. I'll pretend
>>> that SetDelayed (:=) is defvar and Set (=) is defconstant.
>>>
>>> In[1]:= foo := hello
>>>
>>>> *a-constant*
>>>>  => (defconstant *a-constant* 33)
>>>
>>> In[2]:= aconstant = 33
>>>
>>> Out[2]= 33
>> 
>> Not quite.  I want this behavior:
>> 
>>   In[3]:= foo
>> 
>>   Out[3]= foo := hello
>> 
>>   In[4]:= aconstant
>> 
>>   Out[4]= aconstant = 33
>
> What's wrong with the definitions I gave:
>
> In[1]:= foo := Hold[foo := hello]
>
> In[2]:= foo
>
> Out[2]= Hold[foo := hello]
>
> In[3]:= aconstant = Hold[aconstant := 33]
>
> Out[3]= Hold[aconstant := 33]

They don't do what I wanted.  

If I type the value of *foo* into another lisp, it causes that other
lisp to have a binding for *foo* of 'hello.  If I type the Mathematica
value of foo, Hold[foo := hello],  into another Mathematica, I get back
  Hold[foo := hello]

but foo is still unbound.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430e5840$0$97119$ed2619ec@ptn-nntp-reader03.plus.net>
Joe Marshall wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> Until you apply the Y combinator, at which point it becomes int -> int,
>> of course:
>>
>> # let rec f x = if x<2 then x else f(x-2)+f(x-1);;
>> val f : int -> int = <fun>
> 
> In this case, yes, but Y can also give you  f:int->_|_

I see, yes.

> Obviously your experience and the experience of Prof. Fateman differ.

If only you knew. :-)

> I have to say that *I* was surprised that Mathematica operated by
> `pattern match and rewrite until nothing changes'.  That particular
> idea is used in a number of computer applications, but it seems to be
> of most use in the domain of theorem proving and logic programming.  A
> big problem in those domains is demonstrating that your rule set makes
> sense.  Adding a correct rule to a correct rule set does not
> necessarily yield a new correct rule set.  The experts in the field
> acknowledge that it is very hard to reason about rule sets, so I would
> think that it would be extraordinarily difficult for a casual user to
> do so.

The difference is the phenomenal pedantry in hard-core subjects like those.
That is in stark contrast to the hacking about done by your average
scientist using Mathematica, who wouldn't know a formalism if it bit him
(or her) on the ass. At the end of the day, Mathematica is (and was
designed to be) complementary to conventional programming languages.

> It also seems to me to be far more likely that a casual Mathematica
> user has had experience with procedural languages such as C, Basic,
> Pascal, Fortran, or Java.  On the surface, the rule
> 
>   f[x_] := 1 + Sin[x]
> 
> really looks like a statement in one of those languages (I certainly
> would have thought it was).

Yes. However, you have to do very little before you realise that Mathematica
is obviously not the same as other languages. In conventional languages,
the variable "x" bound in the pattern would either have its type statically
inferred and checked to be the same as the argument to Sin[...] (typically
float), dynamically type checked to the same type or explicitly declared to
be of a particular type (which it clearly isn't) and then checked. But in
Mathematica you can pass the "function" f any expression, for example:

In[2]:= f[1+x]

Out[2]= 1 + Sin[1 + x]

So you immediately realise that this isn't the same as the other languages
that you know.

Many Mathematica programmers try to use Do loops and assignment, of course,
just as they do in Lisp, OCaml and so on. I actually advocated adding
functionality to criticise user's programs, automatically rewriting them in
a functional style where possible.

>> What's wrong with the definitions I gave:
>>
>> In[1]:= foo := Hold[foo := hello]
>>
>> In[2]:= foo
>>
>> Out[2]= Hold[foo := hello]
>>
>> In[3]:= aconstant = Hold[aconstant := 33]
>>
>> Out[3]= Hold[aconstant := 33]
> 
> They don't do what I wanted.
> 
> If I type the value of *foo* into another lisp, it causes that other
> lisp to have a binding for *foo* of 'hello.  If I type the Mathematica
> value of foo, Hold[foo := hello],  into another Mathematica, I get back
>   Hold[foo := hello]
> 
> but foo is still unbound.

I think that is just:

In[1]:= foo := (foo := hello)

In[2]:= foo

In[3]:= foo

Out[3]= hello

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <b9tPe.66044$084.25255@attbi_s22>
In article <·························@ptn-nntp-reader03.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> > They don't do what I wanted.
> > 
> > If I type the value of *foo* into another lisp, it causes that other
> > lisp to have a binding for *foo* of 'hello.  If I type the Mathematica
> > value of foo, Hold[foo := hello],  into another Mathematica, I get back
> >   Hold[foo := hello]
> > 
> > but foo is still unbound.
> 
> I think that is just:
> 
> In[1]:= foo := (foo := hello)
> 
> In[2]:= foo
> 
> In[3]:= foo
> 
> Out[3]= hello

No.

He wants to see a Mathematica variable contain exactly "foo := hello",
and then see it be able to output that exact value.  Not 
"Hold[foo := hello]", not "Unevaluated[foo := hello]", and not even
"HoldForm[foo := hello]" (which would look like "foo := hello", but not
be).

Of course Mathematica can not do this.  I think that is his point.

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430e7fcf$0$1311$ed2619ec@ptn-nntp-reader02.plus.net>
Brian Downing wrote:
> He wants to see a Mathematica variable contain exactly "foo := hello",
> and then see it be able to output that exact value.
>
> Not "Hold[foo := hello]", not "Unevaluated[foo := hello]", and not even
> "HoldForm[foo := hello]" (which would look like "foo := hello", but not
> be).
>
> Of course Mathematica can not do this.  I think that is his point.

You do have is something of equivalent functionality, I believe. So in Lisp
you have:

* (defvar foo (list 'defvar '*foo* ''hello))

FOO
* foo

(DEFVAR *FOO* 'HELLO)
* (eval foo)

*FOO*
* *foo*

HELLO
*

And in Mathematica you have:

In[1]:= foo := HoldForm[foo2 := hello]

In[2]:= foo

Out[2]= foo2 := hello

In[3]:= ReleaseHold[foo]

In[4]:= foo2

Out[4]= hello

In both cases: the variable "foo" contains an AST representing an
assignment; fetching "foo" returns the AST; evaluating the value of "foo"
assigns a new variable; the value of the new variable is another AST
containing the symbol "hello".

The difference, it seems to me, is purely notational and makes no difference
to the use of macros in Mathematica. Ultimately, what do you believe Lisp
can do that Mathematica cannot (and don't say print a quoted expression
without a quotation mark)?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <irxs6bbh.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Brian Downing wrote:
>> He wants to see a Mathematica variable contain exactly "foo := hello",
>> and then see it be able to output that exact value.
>>
>> Not "Hold[foo := hello]", not "Unevaluated[foo := hello]", and not even
>> "HoldForm[foo := hello]" (which would look like "foo := hello", but not
>> be).
>>
>> Of course Mathematica can not do this.  I think that is his point.
>
> You do have is something of equivalent functionality, I believe. So in Lisp
> you have:
>
> * (defvar foo (list 'defvar '*foo* ''hello))
>
> FOO
> * foo
>
> (DEFVAR *FOO* 'HELLO)
> * (eval foo)
>
> *FOO*
> * *foo*
>
> HELLO
> *
>
> And in Mathematica you have:
>
> In[1]:= foo := HoldForm[foo2 := hello]
>
> In[2]:= foo
>
> Out[2]= foo2 := hello
>
> In[3]:= ReleaseHold[foo]
>
> In[4]:= foo2
>
> Out[4]= hello

So could I do this?

   foo := HoldForm[foo := hello]


Actually, rather than go back and forth with you acting as my
Mathematica interpreter, is there a demo or free version available
anywhere so I can just type a few things at it?

~jrm
From: Brian Downing
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <WLFPe.308324$xm3.78435@attbi_s21>
In article <············@ccs.neu.edu>, Joe Marshall  <···@ccs.neu.edu> wrote:
> So could I do this?
> 
>    foo := HoldForm[foo := hello]

It doesn't really help you much.  HoldForm is defined to be invisible in
the standard output form:

In[1]:= foo := HoldForm[foo := hello]

In[2]:= foo

Out[2]= foo := hello

...but it's still there.

In[3]:= FullForm[foo]

Out[3]//FullForm= HoldForm[SetDelayed[foo, hello]]

(I don't know of any Mathematica repls available online, either.)

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Richard Fateman
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <FCGPe.1045$dw4.209@newssvr29.news.prodigy.net>
You can download a trial mathematica from
http://www.wolfram.com/products/mathematica/trial.cgi

but I think that several people have discovered that
it may not be worth their time exploring the issue
in order to convince people (or is it just JH?)
that Mathematica is inconvenient to use for
manipulation of programs.

I HAVE a copy of Mathematica, and it has, by and large,
been correctly characterized by its critics; JH largely
defends it by mischaracterizing the criticisms. Sort
of like some politicians.

RJF

Brian Downing wrote:

> In article <············@ccs.neu.edu>, Joe Marshall  <···@ccs.neu.edu> wrote:
> 
>>So could I do this?
>>
>>   foo := HoldForm[foo := hello]
> 
> 
> It doesn't really help you much.  HoldForm is defined to be invisible in
> the standard output form:
> 
> In[1]:= foo := HoldForm[foo := hello]
> 
> In[2]:= foo
> 
> Out[2]= foo := hello
> 
> ...but it's still there.
> 
> In[3]:= FullForm[foo]
> 
> Out[3]//FullForm= HoldForm[SetDelayed[foo, hello]]
> 
> (I don't know of any Mathematica repls available online, either.)
> 
> -bcd
From: David Steuber
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <87irxrh8v2.fsf@david-steuber.com>
Joe Marshall <···@ccs.neu.edu> writes:

> Actually, rather than go back and forth with you acting as my
> Mathematica interpreter, is there a demo or free version available
> anywhere so I can just type a few things at it?

This probably doesn't qualify:

http://www.quickmath.com/

The site uses Mathematica to do its math.

-- 
My .sig file sucks.  Can anyone recommend a better one?
From: Joe Marshall
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <oe7j3x7h.fsf@ccs.neu.edu>
David Steuber <·····@david-steuber.com> writes:

> Joe Marshall <···@ccs.neu.edu> writes:
>
>> Actually, rather than go back and forth with you acting as my
>> Mathematica interpreter, is there a demo or free version available
>> anywhere so I can just type a few things at it?
>
> This probably doesn't qualify:
>
> http://www.quickmath.com/
>
> The site uses Mathematica to do its math.

Thanks.

I think I've decided to give up pursuing this point.  It's just not
that interesting.  Should anyone care, here is where I was going with
it.

Steele outlines the interpretations of the various combinations of
backquote and comma in CLTL.  I reproduce it here:

    Consider this set of sample values: 

    (setq p '(union x y)) 
    (setq q '((union x y) (list 'sqrt 9))) 
    (setq r '(union x y)) 
    (setq s '((union x y)))

    ``(foo ,,p) = (LIST 'LIST ''FOO P) 
     => (LIST 'FOO (UNION X Y)) 
     => (FOO (A B C))

    So ,,p means ``the value of p is a form; use the value of the value of p.'' 


    ``(foo ,,@q) = (LIST* 'LIST ''FOO Q) 
     => (LIST 'FOO (UNION X Y) (LIST 'SQRT 9)) 
     => (FOO (A B C) (SQRT 9))

    So ,,@q means ``the value of q is a list of forms; splice the list
    of values of the elements of the value of q.''  


    ``(foo ,',r) = (LIST 'LIST ''FOO (LIST 'QUOTE R)) 
     => (LIST 'FOO '(UNION X Y)) 
     => (FOO (UNION X Y))

    So ,',r means ``the value of r may be any object; use the value of
    r that is available at the time of first evaluation, that is, when
    the outer backquote is evaluated.'' (To use the value of r that is
    available at the time of second evaluation, that is, when the
    inner backquote is evaluated, just use ,r.)  


    ``(foo ,',@s) = (LIST 'LIST ''FOO (CONS 'QUOTE S)) 
     => (LIST 'FOO '(UNION X Y)) 
     => (FOO (UNION X Y))

    So ,',@s means ``the value of s must be a singleton list of any
    object; use the element of the value of s that is available at the
    time of first evaluation, that is, when the outer backquote is
    evaluated.'' Note that s must be a singleton list because it will
    be spliced into a form (quote ), and the quote special form
    requires exactly one subform to appear; this is generally true of
    the sequence ',@. (To use the value of s that is available at the
    time of second evaluation, that is, when the inner backquote is
    evaluated, just use ,@s,in which case the list s is not restricted
    to be singleton, or ,(car s).)  


    ``(foo ,@,p) = (LIST 'CONS ''FOO P) 
     => (CONS 'FOO (UNION X Y)) 
     => (FOO A B C)

    So ,@,p means ``the value of p is a form; splice in the value of
    the value of p.''  


    ``(foo ,@,@q) = (LIST 'CONS ''FOO (CONS 'APPEND Q)) 
     => (CONS 'FOO (APPEND (UNION X Y) (LIST 'SQRT 9))) 
     => (FOO A B C SQRT 9)

    So ,@,@q means ``the value of q is a list of forms; splice each of
    the values of the elements of the value of q, so that many
    splicings occur.''  


    ``(foo ,@',r) = (LIST 'CONS ''FOO (LIST 'QUOTE R)) 
     => (CONS 'FOO '(UNION X Y)) 
     => (FOO UNION X Y)

    So ,@',r means ``the value of r must be a list; splice in the
    value of r that is available at the time of first evaluation, that
    is, when the outer backquote is evaluated.'' (To splice the value
    of r that is available at the time of second evaluation, that is,
    when the inner backquote is evaluated, just use ,@r.)
 

    ``(foo ,@',@s) = (LIST 'CONS ''FOO (CONS 'QUOTE S)) 
     => (CONS 'FOO '(UNION X Y)) 
     => (FOO UNION X Y)

    So ,@',@s means ``the value of s must be a singleton list whose
    element is a list; splice in the list that is the element of the
    value of s that is available at the time of first evaluation, that
    is, when the outer backquote is evaluated.'' (To splice the
    element of the value of s that is available at the time of second
    evaluation, that is, when the inner backquote is evaluated, just
    use ,@(car s).)  


While I am sure that Mathematica can mimic many of these variations,
the `keep rewriting until it stops changing' rule is likely to be a
problem in the more esoteric ones.  From the look of it, Mathematica
has attempted to paper over the problematic ones with a plethora of
ad-hoc `quotation-like' and `unquote-like' forms:  Hold, HoldForm,
Unevaluated, Evaluate, Release, ReleaseHold, HoldComplete, etc.

Steele ends with this suggestion:

    I leave it to the reader to explore the possibilities of triply
    nested backquotes.

An interesting thing about triply nested backquote is that you can
sandwich an evaluation phase between two quoting phases or vice
versa.  Double-nested backquote doesn't have this property.  An ad-hoc
set of quote/unquote operators will probably not have the power to
handle this.  In particular, I suspect that nesting a single
evaluation step between two quoting phases will be difficult because
of Mathematica's extremely eager rewrite strategy. 

But as I said earlier, this is getting tedious.

~jrm
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <43133380$0$1293$ed2619ec@ptn-nntp-reader02.plus.net>
Joe Marshall wrote:
> I think I've decided to give up pursuing this point.  It's just not
> that interesting.  Should anyone care, here is where I was going with
> it.
> ...

Thanks for the examples. I'll try to translate these into Mathematica and
will report back when they're done.

> While I am sure that Mathematica can mimic many of these variations,
> the `keep rewriting until it stops changing' rule is likely to be a
> problem in the more esoteric ones.

I think your concern is based upon a misinterpretation of Mathematica's
evaluation strategy but, as you say, we're not going to resolve this
without more code and less talk.

> From the look of it, Mathematica 
> has attempted to paper over the problematic ones with a plethora of
> ad-hoc `quotation-like' and `unquote-like' forms:  Hold, HoldForm,
> Unevaluated, Evaluate, Release, ReleaseHold, HoldComplete, etc.

Yes. I don't know what those are all for.

> Steele ends with this suggestion:
> 
>     I leave it to the reader to explore the possibilities of triply
>     nested backquotes.
> 
> An interesting thing about triply nested backquote is that you can
> sandwich an evaluation phase between two quoting phases or vice
> versa.  Double-nested backquote doesn't have this property.  An ad-hoc
> set of quote/unquote operators will probably not have the power to
> handle this.  In particular, I suspect that nesting a single
> evaluation step between two quoting phases will be difficult because
> of Mathematica's extremely eager rewrite strategy.

I agree that this will probably be more difficult in Mathematica but not for
the reason that you give. I do not think the rewrite approach affects this.
However, Mathematica does not recurse into held expressions to evaluate
subexpressions within them. For example, this works:

* `,(+ 1 2)

3

In[1]:= Hold[Evaluate[1+2]]

Out[1]= Hold[3]

But this does not:

* `(+ ,(+ 1 2) 3)

(+ 3 3)

In[2]:= Hold[Evaluate[1+2]+3]

Out[2]= Hold[Evaluate[1 + 2] + 3]

So the easiest way to get the functionality of Lisp's "," is to hoist code:

In[3]:= With[{x=1+2}, Hold[x+3]]

Out[3]= Hold[3 + 3]

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Kent M Pitman
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <ubr3gr7ip.fsf@nhplace.com>
Jon Harrop <······@jdh30.plus.com> writes:

I've only been vaguely spot-checking this conversation, but this caught
my eye:

> > From the look of it, Mathematica 
> > has attempted to paper over the problematic ones with a plethora of
> > ad-hoc `quotation-like' and `unquote-like' forms:  Hold, HoldForm,
> > Unevaluated, Evaluate, Release, ReleaseHold, HoldComplete, etc.
> 
> Yes. I don't know what those are all for.

Doesn't Macsyma (just as an example) offer essentially the same
capability but under different names/syntax.  e.g., it has the
business of noun and verb forms of functions, and a number of
complicated control flags in it EV function (which is considerably
more finnicky and meddlesome than EVAL).  Evaluation is treewalking,
and all the treewalkers I've ever seen either take lots of options or
do less than a fully general job.  Should anyone really be surprised
by an outcome that is replicated both in other math systems and in
symbolic language code/treewalkers?  A "plethora" of options is
unreasonable when the problem to be solved doesn't have a "plethora"
of situations...  Ad hoc?  Yes, well, the alternative is to make a single
one with too many options to be usable.  Sometimes people just want 
arbitrary choices made that they can use most of the time, even if those
choices are not canonical / universally dictated / uniquely determined.
See my EQUAL paper for a discussion of why I thought it wrong on some
level to put EQUAL function into Lisp.  Yet at the same time, I know
EQUAL is used a lot very successfully by people, and so its presence
makes sense at some other, more "ad hoc" level.

http://www.nhplace.com/kent/PS/EQUAL.html
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <43132c9a$0$1293$ed2619ec@ptn-nntp-reader02.plus.net>
Joe Marshall wrote:
> So could I do this?
> 
>    foo := HoldForm[foo := hello]

Yes. Mathematica will then replace the symbol "foo" with the held expression
"foo := hello", i.e. the substituted expression will not be evaluated until
ReleaseHold is applied to it.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Brian Downing
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <cuFPe.308308$xm3.192110@attbi_s21>
In article <························@ptn-nntp-reader02.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> You do have is something of equivalent functionality, I believe. So in Lisp
> you have:
[snip]
> And in Mathematica you have:
[snip]

> In both cases: the variable "foo" contains an AST representing an
> assignment; fetching "foo" returns the AST; evaluating the value of "foo"
> assigns a new variable; the value of the new variable is another AST
> containing the symbol "hello".
> 
> The difference, it seems to me, is purely notational and makes no difference
> to the use of macros in Mathematica. Ultimately, what do you believe Lisp
> can do that Mathematica cannot (and don't say print a quoted expression
> without a quotation mark)?

How do you do the following in Mathematica?

CL-USER> (defvar *foo* '(defvar *foo-2* 'hello))
*FOO*
CL-USER> (second *foo*)
*FOO-2*

i.e., extract the symbol that will be set from the unevaluated expression?

I believe it's possible, but it's certainly not obvious:

In[1]:= bar := a[b[c, d]]

In[2]:= bar[[1]][[1]]

Out[2]= c

In[3]:= foo := HoldForm[SetDelayed[foo2, hello]]

In[4]:= foo[[1]][[1]]

Part::partd: Part specification Null[[1]] is longer than depth of object.

Out[4]= Null[[1]]

Furthermore, the Lisp side didn't evaluate the expression, but
Mathematica did (obviously returning Null because of the error above):

CL-USER> (boundp '*foo-2*)
NIL

In[5]:= foo2

Out[5]= hello

Which leads me to believe there's nothing at all magic about
ReleaseHold[foo] -- foo[[1]] works just as well.

These kind of problems lead me to believe that working with unevaluated
expressions in Mathematica is much more touchy than the equivalent in
Lisp.  Since macros are nothing more than building up and manipulating
unevaluated expressions, I think this is meaningful.

A friend of mine even wrote a talk about this, "Working with Unevaluated
Expressions":

http://library.wolfram.com/infocenter/Conferences/377/

From the abstract:
"Since Mathematica automatically evaluates arguments and return values
of functions, building up a result without exposing intermediate stages
of work to the evaluator requires subtle techniques that even seasoned
Mathematica programmers sometimes find elusive."

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Jon Harrop
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <430e7fd4$0$1311$ed2619ec@ptn-nntp-reader02.plus.net>
Joe Marshall wrote:
> If I type the value of *foo* into another lisp, it causes that other 
> lisp to have a binding for *foo* of 'hello.  If I type the Mathematica
> value of foo, Hold[foo := hello],  into another Mathematica, I get back
>   Hold[foo := hello]
> 
> but foo is still unbound.

In my analogy, the "value of foo" is a held expression and, therefore, is
equivalent to the expression inside the Hold[..] in Mathematica.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <1125032336.226663.162570@f14g2000cwb.googlegroups.com>
Jon Harrop wrote:
> Joe Marshall wrote:
> > If I type the value of *foo* into another lisp, it causes that other
> > lisp to have a binding for *foo* of 'hello.  If I type the Mathematica
> > value of foo, Hold[foo := hello],  into another Mathematica, I get back
> >   Hold[foo := hello]
> >
> > but foo is still unbound.
>
> In my analogy, the "value of foo" is a held expression and, therefore, is
> equivalent to the expression inside the Hold[..] in Mathematica.

You have missed yet another point completely.

Joe Marshall, as far as I can tell, is making the point that in Lisp,
*actual code forms* can be passed around as *actual Lisp data,* without
getting grabbed and massacred by the evaluator every time it comes up
for air.

  Which means that Lisp macros can manipulate those code forms without
any gymnastics. And could, for instance, send those forms over the Net
to some other Lisp implementation to have it evaluated or incorporated
into another program, or whatever.

  Whereas, as you continue to demonstrate, Mathematica requires you to
box things up in Hold[] expressions in order for the pattern-defining
forms to be visible to some hypothetical Mathematica macro function.
Which, to a Lisp programmer, is an obvious pain in the ass: keep the
expression in the box, do all your expression surgery through the
opening in the top of the box, and pass your box up to the next macro,
which had better be expecting expressions to be within one, but only
one, layer of Hold[]
From: Nathan Baum
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <dedtns$ump$1@news6.svr.pol.co.uk>
Joe Marshall wrote:
> But then again, you might not be defining a function at all.
> The fibonacci function can be mathematically defined with this
> recurrance relationship:
> 
>       F(n) = F(n+1) - F(n-1)
> 
> but that definition won't work in a computer.

I don't think that definition will work in a human, either. You'd either 
have to explicitly specify the value of F(n) for two values of n, or use 
it to test if a known sequence fits the relationship.

For example, a human could determine that the sequence { .. 8, -4, 4, 0, 
4, 4, 8 .. } satisfied that relationship. But that isn't part of the 
Fibonacci sequence.

I think a computer could test a sequence against this relationship, and 
generate a sequence from it given definitions of F(n) for two values of n.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance / about Mathematica
Date: 
Message-ID: <87mzn95t36.fsf@thalassa.informatimago.com>
Nathan Baum <···········@btinternet.com> writes:

> Joe Marshall wrote:
>> But then again, you might not be defining a function at all.
>> The fibonacci function can be mathematically defined with this
>> recurrance relationship:
>>       F(n) = F(n+1) - F(n-1)
>> but that definition won't work in a computer.

In a symbolic program, it works perfectly.


> I don't think that definition will work in a human, either. You'd
> either have to explicitly specify the value of F(n) for two values of
> n, or use it to test if a known sequence fits the relationship.
>
> For example, a human could determine that the sequence { .. 8, -4, 4,
> 0, 4, 4, 8 .. } satisfied that relationship. But that isn't part of
> the Fibonacci sequence.
>
> I think a computer could test a sequence against this relationship,
> and generate a sequence from it given definitions of F(n) for two
> values of n.

Since the Fibonacci function is named it's obvious that Joe only gives us a diff.

Original:
    Fibonacci: F(0)=1
               1=F(1)
               F(n)=F(n-1)+F(n-2) ; recurrance relationship.

Joe diffs:     F(n) = F(n+1) - F(n-1) ; recurrance relationship.

Result of the patch:

    Fibonacci: F(0)=1
               1=F(1)
               F(n)=F(n+1)-F(n-1) ; recurrance relationship.

Since both recurrance relationships are stricly equivalent, we get the
same function and the same results with any human (let's define a
human as a mamal able to do it).

It's not too difficult for a program to see this equivalence either,
so it's obvious computers equiped with the right program  can do it too.

-- 
"Our users will know fear and cower before our software! Ship it!
Ship it and let them flee like the dogs they are!"
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430622f9$0$25985$edfadb0f@dread12.news.tele.dk>
Brian Downing wrote:

> The problem, as mentioned in another branch of the thread here, is that
> the Mathematica syntax is incredibly complicated to support some of this
> flexibility, and the Mathematica evaluator is so /incredibly/ complex as
> to basically prohibit understanding of what's going to happen when
> things are evaluated, at least for me.  

The evaluation rules of Mathematica are indeed scary. The "Standard
Evaluation Sequence" consists of 12 (twelve) points:

<http://documents.wolfram.com/v5/TheMathematicaBook/MathematicaReferenceGuide/Evaluation/A.4.1.html>

and then there is "Non-Standard Argument Evaluation" too.

-- 
Jens Axel S�gaard
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mmleeF178cmdU1@individual.net>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
>> Jon Harrop wrote:
>>> Joe Marshall wrote:
>>>>      Infix macros are *hard*.
>>> Do you mean it is difficult to implement infix in Lisp?
>> No, others mentioned that *there are* infix parsers (reader macros) for
>> Lisp.  Most people don't use them I guess :D
> 
> Then why are they "hard"? Perhaps I misunderstood Joe. I thought he meant it
> was difficult to implement infix operators in Lisp. Now I'm thinking maybe
> he meant it is easy to implement infix operators but it is then difficult
> to write macros that use infix syntax?

It's difficult to integrate infix syntax into Lisp.  Maybe the best 
possibility would be to keep the sexp form with its () and whitespace 
conventions, but keep them as lists to convert to prefix form.  That way 
you could still use macros etc.  But again: with n-ary operators prefix 
makes more sense, and even C, ML, Python ... use prefix for everything 
except math and iteration constructs (i.e. function calls).  Lisp macros 
for converting infix-math to Lisp exist, but it's not fun to do normal 
programming that way.

> Presumably that is a Lisp-specific problem because Mathematica has no
> problem using infix notation...

Maybe because math is different.  I don't know about Mathematica's 
macros either...

>>>>      You need to learn the prefix notation *anyway* (because code that
>>>>      operates on code needs to operate at the abstract syntax level,
>>>>      which in lisp is naturally prefix-notated lists).
>>> So Lisp is rather tied to the built-in prefix notation.
>> Not at all.  Implement whatever syntax you want.  If you don't like Lisp
>> syntax at all use a complete, different syntax (and language) like Dylan.
> 
> So you disagree with Joe saying that "lisp is naturally prefix-notated
> lists". You believe that prefix/infix/postfix makes no difference in Lisp?

No, the language is prefix, but you can easily use (but not integrate) 
something else in it.  Integrating infix with macros and functions in 
Lisp would be just as hard as integrating macros into other languages 
(see M4, CPP).

>> True, an experienced C coder probably doesn't make syntax mistakes, or
>> just ()s the code to be sure.  But I feel like in a straight-jacket when
>> coding C.  Not because of memory management, but because of syntax (like
>> the inability to use if and switch as an expression, the need to create
>> a named function in order to pass it as a parameter, the awkward syntax
>> for creating structures that I want to pass as arguments...).
> 
> Yes. To be fair, you are trying to do functional programming in an
> imperative language. Most people do the converse, trying to do imperative
> programming in a functional language. :-)

I'm not even good at FP, I'd say.  I don't really use combinator 
functions, just a function as parameter once in a while, some 
recursion...  But everything just when it's appropriate.  It's sad that 
C gets in the way when you try to express compact, straightforward 
algorithms in it.  Java is hardly better.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4306223f$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
Ulrich Hobelmann wrote:
> It's difficult to integrate infix syntax into Lisp.  Maybe the best
> possibility would be to keep the sexp form with its () and whitespace
> conventions, but keep them as lists to convert to prefix form.  That way
> you could still use macros etc.  But again: with n-ary operators prefix
> makes more sense, and even C, ML, Python ... use prefix for everything
> except math and iteration constructs (i.e. function calls).

No. OCaml uses infix for lists (h :: t), strings (s1^s2), most function
calls (e.g. "f a b" but not "a +| b"). In SML, you can define arbitrary
infix operators with their own precedences and associativities. This is
done in the SML implementation of my ray tracer, for vector operators, for
example.

The OCaml-way is easier to implement (functions with symbol names are
implicitly infix). The SML way is harder to implement but more powerful for
the user. However, it is argued that custom infix operators puncture
modularity, e.g. it is no longer clear what "+" does if you replace it.

> Lisp macros 
> for converting infix-math to Lisp exist, but it's not fun to do normal
> programming that way.

You mean it isn't fun to code everthing in Lisp via an infix-math macro?

>> Presumably that is a Lisp-specific problem because Mathematica has no
>> problem using infix notation...
> 
> Maybe because math is different.  I don't know about Mathematica's
> macros either...

Well, infix notation works well for all sorts of non-math stuff as well,
like lists.

>> So you disagree with Joe saying that "lisp is naturally prefix-notated
>> lists". You believe that prefix/infix/postfix makes no difference in
>> Lisp?
> 
> No, the language is prefix, but you can easily use (but not integrate)
> something else in it.  Integrating infix with macros and functions in
> Lisp would be just as hard as integrating macros into other languages
> (see M4, CPP).

Yes. I see.

>> Yes. To be fair, you are trying to do functional programming in an
>> imperative language. Most people do the converse, trying to do imperative
>> programming in a functional language. :-)
> 
> I'm not even good at FP, I'd say.  I don't really use combinator
> functions, just a function as parameter once in a while, some
> recursion...  But everything just when it's appropriate.  It's sad that
> C gets in the way when you try to express compact, straightforward
> algorithms in it.  Java is hardly better.

Even sadder, it wouldn't be difficult to add that functionality to those
languages...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mmnivF16u43eU1@individual.net>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
>> It's difficult to integrate infix syntax into Lisp.  Maybe the best
>> possibility would be to keep the sexp form with its () and whitespace
>> conventions, but keep them as lists to convert to prefix form.  That way
>> you could still use macros etc.  But again: with n-ary operators prefix
>> makes more sense, and even C, ML, Python ... use prefix for everything
>> except math and iteration constructs (i.e. function calls).
> 
> No. OCaml uses infix for lists (h :: t), strings (s1^s2)

Admitted, the pattern notation for lists is quite nice, and usually 
shorter than Lisp.

>>> Yes. To be fair, you are trying to do functional programming in an
>>> imperative language. Most people do the converse, trying to do imperative
>>> programming in a functional language. :-)
>> I'm not even good at FP, I'd say.  I don't really use combinator
>> functions, just a function as parameter once in a while, some
>> recursion...  But everything just when it's appropriate.  It's sad that
>> C gets in the way when you try to express compact, straightforward
>> algorithms in it.  Java is hardly better.
> 
> Even sadder, it wouldn't be difficult to add that functionality to those
> languages...

I sometimes wonder what the best way to code low-level is:
  * use something like Chicken Scheme (i.e. a Scheme compiler that 
produces C code)
  * write an sexp-syntax language that somehow creates C code with 
labels and gotos ;)
  * create a compiler (to overcome C's stupid calling conventions, 
unless when I need them for FFI)

The second might be easiest to interface with C.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43062b97$0$22943$ed2619ec@ptn-nntp-reader01.plus.net>
Ulrich Hobelmann wrote:
> I sometimes wonder what the best way to code low-level is:
>   * use something like Chicken Scheme (i.e. a Scheme compiler that
> produces C code)
>   * write an sexp-syntax language that somehow creates C code with
> labels and gotos ;)
>   * create a compiler (to overcome C's stupid calling conventions,
> unless when I need them for FFI)
> 
> The second might be easiest to interface with C.

That's a fascinating suggestion. I might just try my hand at this. Sounds
like a good first Lisp project. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <deb7ud$aa4$2@ulric.tng.de>
Ulrich Hobelmann schrieb:

>> No. OCaml uses infix for lists (h :: t), strings (s1^s2)
> 
> 
> Admitted, the pattern notation for lists is quite nice, and usually 
> shorter than Lisp.

Write a pattern matcher in Lisp. From then on you can use a cool OCaml 
feature in Lisp too.
Writing a full blown pattern matcher is not trivial.


Andr�
-- 
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <M7qOe.56$DJ5.70033@typhoon.nyu.edu>
Andr� Thieme wrote:
> Ulrich Hobelmann schrieb:
> 
>>> No. OCaml uses infix for lists (h :: t), strings (s1^s2)
>>
>>
>>
>> Admitted, the pattern notation for lists is quite nice, and usually 
>> shorter than Lisp.
> 
> 
> Write a pattern matcher in Lisp. From then on you can use a cool OCaml 
> feature in Lisp too.
> Writing a full blown pattern matcher is not trivial.

Shameless plug!

http://common-lisp.net/projects/cl-unification :)

Cheers
--
Marco
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3wtmka2q4.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> I heard that infix parsers are rarely used not because infix is worse but
> because infix is unusual in Lisp code and increases incompatibility. Would
> you agree with that?

No, I would not.  I would say that infix is used (and should be used)
in those cases where it actually is a better match to the domain being
addressed.  That said, I would tend to believe that infix is rarely
used in Lisp because it is largely irrelevant to most domains (those
traditionally targeted by Lisp or not)


> >> There is unquestionably a huge amount of evidence to the contrary. Most
> >> natural and programming languages have complicated grammars precisely
> >> because it simplifies their use and makes them easier to understand.
> > 
> > Would you please point us to evidence in this direction?
> 
> Firstly, do you agree that languages are evolving to be more concise?

Which languages?  The some of the most popular _programming_ languages
certainly are not (C++, Java, C#, VB, etc.).  There are others that
are (Perl, Arc, ...)

> Secondly, do you agree that more concise languages tend to have more
> complicated grammars?

No.  Arc is an obvious counter example.

> Finally, what other reason could drive this association?

First, there appears to be little or no evidence _for_ the claimed
association.  Second, why on earth would you think that would be the
"only" reason


> _I believe_ that languages are evolving to be more concise and to have more
> complicated grammars. _I can see_ no reason for complicating grammars unless
> it aids brevity/elegance/comprehensibility. So I see the evolution of
> natural and programming languages as a huge amount of evidence that
> complicated grammars are used to simplify the use of languages.
>
> _IMHO_, humans are very good at deciphering expressions written in
> complicated grammars, and this is why we make things easier for
> ourselves by complicating grammars.

Emphasis mine.  OK, you call this "evidence"?  It is pretty clear that
it is just opinion based on some vague intuitions.  When I think of
evidence in this sort of context, I mean real scientific studies,
backed by reasonably solid experiments (the sort of stuff that CogSci
folks do).  Do you have any references to some exampls of such
supporting your claim?


> In particular, we are better at understanding many short expressions

Sort of, _but_

> written in the context of a complicated grammar, rather than many
> long expressions written with a very simple grammar.

this is at best a non sequitor and more than likely just plain
incorrect.  You appear to be all confused and thinking that high
expressivity (which is what I believe you are really trying to get at)
_implies_ complicated syntax.  This is definitely incorrect.

The key to high expressivity is having a close match between the
syntax of expression and the _meaning_ (semantics) being conveyed.
This is why all domains have their own "lingo" and "idioms" and such,
i.e., their own "sublanguage" that closely mirrors (in structure as
well as terminology) the content and behavior exhibited in/by the
domain, thereby promoting concise descriptions.  None of this implies
or requires complex syntax and complex syntax is almost certainly as
bad a thing here as elsewhere.  Just think of C++, you obviously don't
_need_ all that complexity to express any given program even with the
same level of efficiency.  It's there for reasons having nothing to do
with expressivity and arguably one of the worst things about the
language.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <54AMe.1689$Dd.7285@newscontent-01.sprint.ca>
Jon Harrop wrote:
> ...
> There is unquestionably a huge amount of evidence to the contrary.

i have my doubts

> Most
> natural and programming languages have complicated grammars precisely
> because it simplifies their use and makes them easier to understand.

if you believe chomsky, the human brain is hardwired for a certain style 
of grammar (at least that's what i read somewhere), so natural languages 
probably have a syntax that goes along with with this hardwiring.  it 
might be complicated to describe formally, but isn't for people's 
everyday needs.  if you go beyond that, things begin to look different. 
  the mathematical notation (also a kind of syntax) was developed 
because natural language is woefully inadequate to describe mathematical 
problems without support of a more concise notation

> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age.

as far as i know, lukasiewicz invented the polish notation long before 
computers were invented (according to wikipedia in 1920).  i couldn't 
fins anything about his motivation, but wouldn't be surprised that it 
had something to do with problems with the conventional notation once 
you begin to add operators and precedence levels

> It seems at best odd and at
> worst stupid to disregard this.

that would depend on whether the conventional notation approaches its 
limitations or not

hs
From: [Invalid-From-Line]
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87zmrhm775.fsf@kafka.homenet>
> as far as i know, lukasiewicz invented the polish notation long before
> computers were invented (according to wikipedia in 1920).  i couldn't
> fins anything about his motivation

He appears to have been working on multivalued logics and
wanted to avoid parentheses.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87pssdctbc.fsf@thalassa.informatimago.com>
Jon Harrop <······@jdh30.plus.com> writes:
> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age. It seems at best odd and at
> worst stupid to disregard this.

In mathematics, there are two precedence levels ({+,-} vs. {*,/} (and
even, division is usually written as an horizontal bar, that is
essentially just a big parenthesis).  In C there are 28 levels.  Do you
know a lot of C programmer who know the exact precendence levels of
each of C operators?  I knew those of Wirth's Pascal, but never those
of C: it was easier to use a lot of parentheses in C.

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
Our enemies are innovative and resourceful, and so are we. They never
stop thinking about new ways to harm our country and our people, and
neither do we. -- Georges W. Bush
From: M Jared Finder
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <Ba2dndWLxNpeWJ_eRVn-3Q@speakeasy.net>
Pascal Bourguignon wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
> 
>>Additionally (pun intended), we were all taught operator precedences in
>>conventional mathematics at a very young age. It seems at best odd and at
>>worst stupid to disregard this.
> 
> 
> In mathematics, there are two precedence levels ({+,-} vs. {*,/} (and
> even, division is usually written as an horizontal bar, that is
> essentially just a big parenthesis).  In C there are 28 levels.  Do you
> know a lot of C programmer who know the exact precendence levels of
> each of C operators?  I knew those of Wirth's Pascal, but never those
> of C: it was easier to use a lot of parentheses in C.

C's precedence is easy!  Just remember:

C's precedence does exactly what you want.  Except when it doesn't.

(Especially with regard to a = b == c.)

   -- MJF
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124280842.358576.191810@g47g2000cwa.googlegroups.com>
Pascal Bourguignon wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
> > Additionally (pun intended), we were all taught operator precedences in
> > conventional mathematics at a very young age. It seems at best odd and at
> > worst stupid to disregard this.
>
> In mathematics, there are two precedence levels ({+,-} vs. {*,/} (and
> even, division is usually written as an horizontal bar, that is
> essentially just a big parenthesis).  In C there are 28 levels.  Do you
> know a lot of C programmer who know the exact precendence levels of
> each of C operators?  I knew those of Wirth's Pascal, but never those
> of C: it was easier to use a lot of parentheses in C.

For all normal intents and purposes there are 20 levels of precedence
in C.  C programs only use some of them to make their code
understandable.  But the situation is still far from ideal.

Regarding teaching precedence in School.  If I was taught it I don't
remember it.  I learnt it in the first year of my degree from the shop
assistant selling me a graphical calculator.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mgaqiF15mgq3U7@news.dfncis.de>
Pascal Bourguignon <····@mouse-potato.com> wrote:

>In mathematics, there are two precedence levels ({+,-} vs. {*,/} (and
>even, division is usually written as an horizontal bar, that is
>essentially just a big parenthesis).  In C there are 28 levels.  Do you
>know a lot of C programmer who know the exact precendence levels of
>each of C operators?  I knew those of Wirth's Pascal, but never those
>of C: it was easier to use a lot of parentheses in C.

Plus, part of the precedence is erratic, as even BWK admits. On
*BSD, theres an operator(7) manpage, which is very handy for cases
when you're puzzling about precedence.

mkb.
From: George Neuner
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <0js8g1pl0ftc2rkinjha0sdm6rbes6h74e@4ax.com>
On Wed, 17 Aug 2005 02:15:19 +0200, Pascal Bourguignon
<····@mouse-potato.com> wrote:

>In C there are 28 [precedence] levels.


I count 17 levels in C

17:   []     subscripting
      ()     function call
      .      
      ->     

16:   ++ --  postfix

15:   ++ --  prefix
      +  -   unary 
      &      address of
      *      deference
      ~ 
      ! 
      sizeof

14:   ()     cast

13:   * / %

12:   + -

11:   << >>

10:   < > <= >=

 9:   == !=

 8:   &

 7:   ^

 6:   |

 5:   &&

 4:   ||

 3:   ?:

 2:   = += -= *= /= %= <<= >>= &= ^= |=

 1:   comma



C++ also has 17 levels, but changes the grouping just enough to
potentially screw up porting under-parenthesized C code.

17:   ::     scope resolution

16:   []     subscripting
      ()     function call
      ()     value construction
      .      direct selection
      ->     indrect selection
      sizeof


15:   ++ --  post and pre
      + -    unary 
      ~ 
      ! 
      &      address of
      *      deference
      ()     cast
      new
      delete
      delete []

14:   .*     member section
      ->*    member section


Levels 1 through 13 are the same as for C.

 

Just curious ... where do you find 28 levels?


George
--
for email reply remove "/" from address
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mjgi7F16t999U2@news.dfncis.de>
George Neuner <·········@comcast.net> wrote:

>I count 17 levels in C
[...]

I see 15:

OPERATOR(7)        FreeBSD Miscellaneous Information Manual        OPERATOR(7)

NAME
     operator -- C operator precedence and order of evaluation

DESCRIPTION
           Operator                             Associativity
           --------                             -------------
           () [] -> .                           left to right
           ! ~ ++ -- - (type) * & sizeof        right to left
           * / %                                left to right
           + -                                  left to right
           << >>                                left to right
           < <= > >=                            left to right
           == !=                                left to right
           &                                    left to right
           ^                                    left to right
           |                                    left to right
           &&                                   left to right
           ||                                   left to right
           ?:                                   right to left
           = += -= *= /= %= <<= >>= &= ^= |=    right to left
           ,                                    left to right

FILES
     /usr/share/misc/operator

FreeBSD 5.4                    January 22, 2003                    FreeBSD 5.4

mkb.
From: George Neuner
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <0vh9g1hckbaldqhqf0sbmmspvobth614pv@4ax.com>
On 18 Aug 2005 13:25:28 GMT, Matthias Buelow <···@incubus.de> wrote:

>George Neuner <·········@comcast.net> wrote:
>
>>I count 17 levels in C
>[...]
>
>I see 15:
>
>OPERATOR(7)        FreeBSD Miscellaneous Information Manual        OPERATOR(7)
>
>NAME
>     operator -- C operator precedence and order of evaluation
>
>DESCRIPTION
>           Operator                             Associativity
>           --------                             -------------
>           () [] -> .                           left to right
>           ! ~ ++ -- - (type) * & sizeof        right to left
>           * / %                                left to right
>           + -                                  left to right
>           << >>                                left to right
>           < <= > >=                            left to right
>           == !=                                left to right
>           &                                    left to right
>           ^                                    left to right
>           |                                    left to right
>           &&                                   left to right
>           ||                                   left to right
>           ?:                                   right to left
>           = += -= *= /= %= <<= >>= &= ^= |=    right to left
>           ,                                    left to right
>
>FILES
>     /usr/share/misc/operator
>
>FreeBSD 5.4                    January 22, 2003                    FreeBSD 5.4
>
>mkb.


K&R lists the same 15.

Steele's "C - A Reference Manual", 3rd Ed. lists 17 ... the table on
page 167 breaks out typecasting and postfix ++/-- into separate
levels.

Stroustrup's "The C++ Programming Language", 2nd Ed. also lists 17
levels (different from C's) in the table on pages 89-90.  The 3rd Ed.
lists 18 in the table on page 120-121, inserting "throw" at level 2
between comma and ternary operators.


I seem to have mislaid my paper copy of the C standard.  However, I do
have the C++ standard - ISO/IEC 14882:1998.  Associativity for each
operator is mentioned in the text but a quick search finds no tables
of operators.  Searching for "precedence" yields only a few items, one
of which, [Section 5: Expressions, footnote 53 on page 63], says "The
precedence of operators is not directly specified, but it can be
derived from the syntax."

I can't recall now whether the C standard directly specified the
precedence relationships either.  In any event, 15 vs 17 levels is not
a significant difference - if the levels are derived from syntax the
difference could be the result of tweakings over time.   Nor I think
are the distinctions between the K&R and Steele versions really
noteworthy.  Syntactically I'm not sure how to even go about testing
them.

Anyway, Pascal wrote in his post that C had 28 precedence levels and I
wondered where he got *that* number from.

George
--
for email reply remove "/" from address
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mk0jlF17c82hU2@news.dfncis.de>
Stefan Ram <···@zedat.fu-berlin.de> wrote:

>  I have learnt the following expression as an example for a
>  C++-expression that can not be understood using precedence
>  alone.
>
>a = b < c ? d = e : f = g

Why not?

>  Here the grammar is needed to find the correct interpretation,
>  which is supposed to be
>
>a =(( b < c )?( d = e ):( f = g ))

Of course. It's consistent with the precedence rules. I can't see
the problem. Where would the first line be ambiguous?

mkb.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mk43rF17aac2U1@news.dfncis.de>
Stefan Ram <···@zedat.fu-berlin.de> wrote:

>  If "=" would be weaker than "?:", it might be read as:
>a =(( b < c ? d = e : f )= g )

Hmm, ok, that's true.

mkb.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <8764u39xci.fsf@thalassa.informatimago.com>
George Neuner <·········@comcast.net> writes:
> On Wed, 17 Aug 2005 02:15:19 +0200, Pascal Bourguignon
> <····@mouse-potato.com> wrote:
>>In C there are 28 [precedence] levels.
> I count 17 levels in C
> Just curious ... where do you find 28 levels?

It doesn't make a qualitative difference, I cannot remember more
easily 17 than 28 levels.

-- 
"By filing this bug report you have challenged the honor of my
family. Prepare to die!"
From: George Neuner
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <a1o9g113a3tgqdlua0ilfs73ngvsm5u3v4@4ax.com>
On Thu, 18 Aug 2005 15:41:01 +0200, Pascal Bourguignon
<····@mouse-potato.com> wrote:

>George Neuner <·········@comcast.net> writes:
>> On Wed, 17 Aug 2005 02:15:19 +0200, Pascal Bourguignon
>> <····@mouse-potato.com> wrote:
>>>In C there are 28 [precedence] levels.
>> I count 17 levels in C
>> Just curious ... where do you find 28 levels?
>
>It doesn't make a qualitative difference, I cannot remember more
>easily 17 than 28 levels.

OK.  I just wondered if there was some enumeration I was unaware of.

George
--
for email reply remove "/" from address
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124271889.844237.91600@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:

> For example, nobody in their right mind would consider writing production
> code in Whitespace or Brainf*** because they are clearly less readable and
> maintainable, even though it is subjective. That is a clear-cut case, but
> with Lisp vs ML it is not so simple, IMHO.

That is not true if we are not talking at cross points. The Clean
langauge uses whitespace-formatting (it resembles a lot Python).
However, I felt never impeded by the Clean language as opposed to
Python. The Clean language had one of the best syntax I know of.

Schneewittchen
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124299075.429000.191190@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Joe Marshall wrote:
> > Is readability simply a subjective measure, then?  If so, and if
> > maintainability is about how easily a human can parse it, then
> > maintainability is also a subjective measure (and not particularly
> > interesting for comparing computer languages).
>
> Yes. Readability and maintainability are inherently subjective. However,
> they are both very important when comparing computer languages.
>
> For example, nobody in their right mind would consider writing production
> code in Whitespace or Brainf*** because they are clearly less readable and
> maintainable, even though it is subjective. That is a clear-cut case, but
> with Lisp vs ML it is not so simple, IMHO.

Yep, it isn't.

> > It seems likely to me that languages that require complex parsers are
> > harder for humans to understand as well.
>
> There is unquestionably a huge amount of evidence to the contrary. Most
> natural and programming languages have complicated grammars precisely
> because it simplifies their use and makes them easier to understand.

That's probably true.
The question is though: Are the computer languages that have complex
grammars easier to understand than the ones without them?

For example, although the grammar of C++ is quite complex I don't think
the complexity particularly helps the readability.

Most language designers who put complex syntax in their languages are
attempting to improve readbility, but in my opinion most of them aren't
succeeding.

> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age. It seems at best odd and at
> worst stupid to disregard this.

I wasn't :)

Operator prescedence is confusing in general, and worse in programming
languages than in mathematics.  For example, on comp.lang.c, someone
said:-

INT_MAX - 4 + 3

.. invokes undefined behaviour.  It doesn't because associativity is
left to right it C.  This would not be surprising were it a C beginner
who made this mistake, but it wasn't, it was someone extremely
knowledgable in the language.

See:
http://groups.google.co.uk/group/comp.lang.c/msg/3b79d35564ae78ab?hl=en&
http://groups.google.co.uk/group/comp.lang.c/msg/8f408bcade6b366a?hl=en&
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4q9oa8p1.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
>> Is readability simply a subjective measure, then?  If so, and if
>> maintainability is about how easily a human can parse it, then
>> maintainability is also a subjective measure (and not particularly
>> interesting for comparing computer languages).
>
> Yes. Readability and maintainability are inherently subjective. However,
> they are both very important when comparing computer languages.
>
> For example, nobody in their right mind would consider writing production
> code in Whitespace or Brainf*** because they are clearly less readable and
> maintainable, even though it is subjective. That is a clear-cut case, but
> with Lisp vs ML it is not so simple, IMHO.

I would point to Whitespace, Brainf***, Unlambda, Intercal, and
machine code as evidence that readability has an objective component.
Who could deny that `clear-screen' is more readable than `21 00 43 11
20 80 01 00 3D ED B0'

>> It seems likely to me that languages that require complex parsers are
>> harder for humans to understand as well.
>
> There is unquestionably a huge amount of evidence to the contrary. Most
> natural and programming languages have complicated grammars precisely
> because it simplifies their use and makes them easier to understand.

I disagree with your conclusion.

Most natural language have complicated grammars because it increases
the bandwidth and decreases the error rate in spoken communication.
Languages with very complex grammars (like Navajo) take much longer to
learn than those with simple grammars (like Esperanto).  If the more
complicated grammar is simpler to use and easier to understand, then
the opposite should be the case.

Programming languages with complex grammars have proven quite
difficult to use and understand.  Most computer languages have (more
or less) context-free grammars.  Computer languages that are
context-sensitive turn out to be hard to understand and hard to use.
As an obvious example, consider C++.  (Ed Willink gives these
examples.) 

   int(x), y, *const z;

   int(x), y, new int;

   int(x), y, z = 0;

The meaning of the `int(x)' subcomponent cannot be determined until
you parse the meaning of what is to the right of the y.

> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age. 

I think this is the root of the issue.  Between standard algebraic
forms and the way computer languages have attempted to mimic them,
people get used to parsing these.

> It seems at best odd and at worst stupid to disregard this.

I don't disregard it, but there are *many* things we get taught at a
young age that are at best suboptimal and at worst wrong and
dangerous.  It isn't considered odd to try to remedy this.

~jrm
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2zmrgwl5n.fsf@gigamonkeys.com>
Joe Marshall <···@ccs.neu.edu> writes:

> I would point to Whitespace, Brainf***, Unlambda, Intercal, and
> machine code as evidence that readability has an objective component.
> Who could deny that `clear-screen' is more readable than `21 00 43 11
> 20 80 01 00 3D ED B0'

Well, that depends on what 'clear-screen' actually does, doesn't it. ;-)

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <tRTMe.1810$Dd.7709@newscontent-01.sprint.ca>
Joe Marshall wrote:
> ...
> Most natural language have complicated grammars because it increases
> the bandwidth and decreases the error rate in spoken communication.
> Languages with very complex grammars (like Navajo) take much longer to
> learn than those with simple grammars (like Esperanto).

is that also true for native speakers?  i.e. does it take navajo 
children longer to learn to speak?

> ...

hs
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <pssb6xcs.fsf@ccs.neu.edu>
Hartmann Schaffer <··@hartmann.schaffernet> writes:

> Joe Marshall wrote:
>> ...
>> Most natural language have complicated grammars because it increases
>> the bandwidth and decreases the error rate in spoken communication.
>> Languages with very complex grammars (like Navajo) take much longer to
>> learn than those with simple grammars (like Esperanto).
>
> is that also true for native speakers?  i.e. does it take navajo
> children longer to learn to speak?

I'm sure that Navajo kids start speaking at the same age as everyone
else.  It takes children several years to become fluent in their
native language (it takes them 3 full years to reach the proficiency
of a 3 year old, right?)  
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mk276F172p79U2@individual.net>
Joe Marshall wrote:
> Hartmann Schaffer <··@hartmann.schaffernet> writes:
> 
>> Joe Marshall wrote:
>>> ...
>>> Most natural language have complicated grammars because it increases
>>> the bandwidth and decreases the error rate in spoken communication.
>>> Languages with very complex grammars (like Navajo) take much longer to
>>> learn than those with simple grammars (like Esperanto).
>> is that also true for native speakers?  i.e. does it take navajo
>> children longer to learn to speak?
> 
> I'm sure that Navajo kids start speaking at the same age as everyone
> else.  It takes children several years to become fluent in their
> native language (it takes them 3 full years to reach the proficiency
> of a 3 year old, right?)  

I guess children still use plenty of wrong declination/conjugation.  A 
simpler language (I read book tomorrow) prevents making mistakes, while 
forcing a stricter word order (in Latin you can reorder pretty much 
everything you want, since every word has a case).

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124439998.291609.191700@g44g2000cwa.googlegroups.com>
Ulrich Hobelmann wrote:
> Joe Marshall wrote:
> > Hartmann Schaffer <··@hartmann.schaffernet> writes:
> >
> >> Joe Marshall wrote:
> >>> ...
> >>> Most natural language have complicated grammars because it increases
> >>> the bandwidth and decreases the error rate in spoken communication.
> >>> Languages with very complex grammars (like Navajo) take much longer to
> >>> learn than those with simple grammars (like Esperanto).
> >> is that also true for native speakers?  i.e. does it take navajo
> >> children longer to learn to speak?
> >
> > I'm sure that Navajo kids start speaking at the same age as everyone
> > else.  It takes children several years to become fluent in their
> > native language (it takes them 3 full years to reach the proficiency
> > of a 3 year old, right?)
>
> I guess children still use plenty of wrong declination/conjugation.  A
> simpler language (I read book tomorrow) prevents making mistakes, while
> forcing a stricter word order (in Latin you can reorder pretty much
> everything you want, since every word has a case).

Interesting, someone was telling me about that just yesterday.

In natural languages there's a conflict between complicated word
modifiers and complicated grammar.  You can have lots of grammar and
simple unmodified words - like english - or simpler grammar and lots of
information put in the individual words like latin.  People have
difficulty learning either.

Lisp moves in a direction no natural language does or could: simple
grammar and fairly simple vocabulary, but loads of parens.

(Don't know where German fits in here).
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mlmc7F17n7ejU1@individual.net>
Rob Thorpe wrote:
> In natural languages there's a conflict between complicated word
> modifiers and complicated grammar.  You can have lots of grammar and
> simple unmodified words - like english - or simpler grammar and lots of
> information put in the individual words like latin.  People have
> difficulty learning either.

I don't think English has a lot of grammar, it's mostly word order. 
Latin has loads of declination attached to everything.  That means it's 
easy to match words together with their attributes, but it also means 
you have to listen closely, as word order isn't enough for parsing.

> Lisp moves in a direction no natural language does or could: simple
> grammar and fairly simple vocabulary, but loads of parens.

Not really.  Lisp has very simple expressions, but the first word is 
always the verb (or a macro).  All expressions are explicitly delimited, 
unlike in other languages, where you have to learn individual rules 
(implicit phrase termination) for each keyword.  Often, as in C, there 
are several kinds of terminators ({} for blocks, ; for statements, ) for 
param lists...).

> (Don't know where German fits in here).

As it is, German is pretty much translated Latin.  The grammar is quite 
similar in many aspects.  That makes it hard to learn I guess.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de3bub$j0v$1@ulric.tng.de>
Jon Harrop schrieb:

> Additionally (pun intended), we were all taught operator precedences in
> conventional mathematics at a very young age. It seems at best odd and at
> worst stupid to disregard this.
> 

No pun intended, but if it really is so easy to work with operator 
precedence, how could someone trained in it write something like this 
comparison:

-a+b*c/d
(+ (- a) (* b (/ c d)))


Andr�
-- 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43021e94$0$97107$ed2619ec@ptn-nntp-reader03.plus.net>
Christophe Rhodes wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> Is Lisp code not made less maintainable because of all those brackets?
> 
> No, it is made more maintainable because of all those brackets,
> because it is straightforward to write tools which can manipulate the
> textual representation of your program, and because human programmers
> do not read the brackets.

I don't think that makes sense. Continuing that line of thinking, Whitespace
and Brainf*** are the most maintainable languages.

Consider the example:

(defun fib (x)
          (if (<= x 2)
              1 
              (+ (fib (- x 2))(fib (1- x)))))

In ML this is:

let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)

That may be easier to parse for the machine (I don't think it is though) but
maintainability is about how easily a human can parse it.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124218369.166626.304520@o13g2000cwo.googlegroups.com>
Jon Harrop wrote:
> Christophe Rhodes wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> Is Lisp code not made less maintainable because of all those brackets?
> >
> > No, it is made more maintainable because of all those brackets,
> > because it is straightforward to write tools which can manipulate the
> > textual representation of your program, and because human programmers
> > do not read the brackets.
>
> I don't think that makes sense. Continuing that line of thinking, Whitespace
> and Brainf*** are the most maintainable languages.
>
> Consider the example:
>
> (defun fib (x)
>           (if (<= x 2)
>               1
>               (+ (fib (- x 2))(fib (1- x)))))
>
> In ML this is:
>
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
>
> That may be easier to parse for the machine (I don't think it is though) but
> maintainability is about how easily a human can parse it.

The text of a program in lisp is -more or less- a direct representation
of a tree.

When the lisp reader sees the first paren above it begins a new list,
and puts each symbol it meets in the list.  When it hits a closing
paren it closes the list and goes back to the previous one it was
processing.  There are a few exceptions, but this is generally how it's
done.

In BNF this can be represented like this:

s_expression = atomic_symbol | "(" s_expression "."s_expression ")" |
            list
list = "(" s_expression < s_expression > ")"

There are many list functions that handle the language.  Of those there
are 3 main ones:
* "read" brings the textual form into memory (s-expressions -> trees)
* "print" does the reverse, (trees-> s-expressions)
* "eval" interprets code, executing the functions specified in the
trees

This has some interesting repercussions.

Firstly, at the language level, since everything - data and code - is
represented as above the language becomes more amenable to automatic
operations.
Some examples:-

1. If I load my buggy version of your raytracer into lisp and do:

(setq foo (ray-sphere ray sph))

foo now contains a huge tree.  Should I want this tree in the future
(for more debugging f.e.g) I can print it to a file with print, then
later read it back with read.

2. Emacs does not have to guess things about lisp.  With other
languages it occasionally has to guess, because it cannot understand
the syntax in reasonable time.  When it tells you all the parens match,
they match.

3. In a place (a fearful place :) ) that doesn't have emacs all is not
lost.  If lisp can read the code it can print it.  So, I can quote the
list with a quote mark*, and watch it:

'((defun fib (x) (if (<= x 2) 1 (+ (fib (- x 2))(fib (1- x)))))

It will then spit it back with whatever default formatting the lisp
printer uses for lists (there may also be an option for printing
functions in the printer).  The formatting may not be very nice, but if
the code is truely hairy it will be more understandable than it was.
You can use this sort of this to do things like substituting sub-trees
in your code.
(* quote marks are one of the few true bits of syntax)

4. It makes macros easier to understand since they act on trees.


All of the above describes benefits in automatic formatting, not
readability.
Some people find it readable, some don't.  I personally don't find it
very easy or very difficult.  There are a number of tricks to reading
it and writing it.
What I find most effective is to do this:

When reading this for example:
(defun fib (x)
           (if (<= x 2)
               1
               (+ (fib (- x 2))(fib (1- x)))))

The meaning of the first line is fairly simple, as are the next two.
The problem is the last line.  "fib" is obviously called recursively
twice and the results are supposed to be added, but are they?

The easiest way to find out is to delete all the closing parens at the
end with Emacs.  You then replace them one by one, the cursor will
highlight the opening paren as this happens.  You can then watch them
and check they go in the right places.  Once you've finished closing
the parens on the line you're on, the rest should match on the lines
above to each left-most paren.

Occasionally I do spend a minute or so staring at the parens and
wondering what's wrong with them though.  It's not perfect for my own
reading.

There are a few other reading advantages that come with abandoning
syntax though.  For example you don't have to worry where in the manual
it is described, there's no page 53.  I've always had this problem with
Perl, I know something's wrong with it, but I can't remember the place
in the manual that explains it.  For a lisp the manual/standard need
only describe forms and what they do.

It also means errors can't leak syntactically: the error is in one
form.  They can certainly leak semantically though.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43026d3d$0$17486$ed2e19e4@ptn-nntp-reader04.plus.net>
Rob Thorpe wrote:
> Firstly, at the language level, since everything - data and code - is
> represented as above the language becomes more amenable to automatic
> operations.

Yes. But is that related to the grammar?

> Some examples:-
> 
> 1. If I load my buggy version of your raytracer into lisp and do:
> 
> (setq foo (ray-sphere ray sph))
> 
> foo now contains a huge tree.  Should I want this tree in the future
> (for more debugging f.e.g) I can print it to a file with print, then
> later read it back with read.

That's cool. I guess the nearest in OCaml is to use the compiler's lexer and
parser to input the code and then marshall it to disc. Not very elegant...

> 2. Emacs does not have to guess things about lisp.  With other
> languages it occasionally has to guess, because it cannot understand
> the syntax in reasonable time.  When it tells you all the parens match,
> they match.

Ok. Can you give an example of a time when emacs has to guess?

> 3. In a place (a fearful place :) ) that doesn't have emacs all is not
> lost.  If lisp can read the code it can print it.  So, I can quote the
> list with a quote mark*, and watch it:
> 
> '((defun fib (x) (if (<= x 2) 1 (+ (fib (- x 2))(fib (1- x)))))
> 
> It will then spit it back with whatever default formatting the lisp
> printer uses for lists (there may also be an option for printing
> functions in the printer).  The formatting may not be very nice, but if
> the code is truely hairy it will be more understandable than it was.
> You can use this sort of this to do things like substituting sub-trees
> in your code.

Right, I saw that in Sussman's lecture (IIRC).

> (* quote marks are one of the few true bits of syntax)

Yes, "#" and ";" seem to be others. I don't know what "#" does.

> 4. It makes macros easier to understand since they act on trees.

So you predict that Lisp macros are easier to understand than OCaml's
macros?

> All of the above describes benefits in automatic formatting, not
> readability.
> Some people find it readable, some don't.  I personally don't find it
> very easy or very difficult.  There are a number of tricks to reading
> it and writing it.

I'm finding prefix notation readable but overly verbose. Counting
parentheses is a real pain though. Assuming the Lisp advocates here also
know conventional languages then there is plenty of evidence that one can
learn to read Lisp.

> The easiest way to find out is to delete all the closing parens at the
> end with Emacs.  You then replace them one by one, the cursor will
> highlight the opening paren as this happens.  You can then watch them
> and check they go in the right places.  Once you've finished closing
> the parens on the line you're on, the rest should match on the lines
> above to each left-most paren.

Yes. That is almost exactly what I am doing.

> It also means errors can't leak syntactically: the error is in one
> form.  They can certainly leak semantically though.

Yes. I think that the majority of the errors I get in other languages are
already semantic though.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: M Jared Finder
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <MpednaU_ZvycVZ_eRVn-jA@speakeasy.net>
Jon Harrop wrote:
> Rob Thorpe wrote:
> 
>>3. In a place (a fearful place :) ) that doesn't have emacs all is not
>>lost.  If lisp can read the code it can print it.  So, I can quote the
>>list with a quote mark*, and watch it:
>>
>>'((defun fib (x) (if (<= x 2) 1 (+ (fib (- x 2))(fib (1- x)))))
>>
>>It will then spit it back with whatever default formatting the lisp
>>printer uses for lists (there may also be an option for printing
>>functions in the printer).  The formatting may not be very nice, but if
>>the code is truely hairy it will be more understandable than it was.
>>You can use this sort of this to do things like substituting sub-trees
>>in your code.
> 
> Right, I saw that in Sussman's lecture (IIRC).
> 
>>(* quote marks are one of the few true bits of syntax)

Not quite.  Common Lisp has a feature called "reader macros" that allow 
you to execute arbitrary parsing code when a specific character is hit. 
  Assuming I had some other way to give the Lisp environment syntax 
trees, left parenthesis (, right parenthesis ), quote ', double-quote " 
and nearly all other syntax could be defined in Lisp.  See 
SET-MACRO-CHARACTER.

The only thing that can't be defined is the core reader algorithm as 
described in <http://www.lisp.org/HyperSpec/Body/sec_2-2.html>, which 
parses symbols and numbers.  Also possibly : and :: for naming symbols 
in other packages.

> Yes, "#" and ";" seem to be others. I don't know what "#" does.

# is a dispatching macro character that executes different parsing code 
depending on the next non-numeric character.  See 
SET-DISPATCH-MACRO-CHARACTER.  Some dispatching macro characters are:

#( for reading arrays, as in #(1 2 3)
#\ for reading characters, as in #\a
#< for signaling an error, as in #<FUNCTION CAR>

>>4. It makes macros easier to understand since they act on trees.
> 
> So you predict that Lisp macros are easier to understand than OCaml's
> macros?

Is this a fair comparison?  Can OCaml's macros do the same things Lisp's 
macros can do?  All the Lisp looping operations can be implemented as 
macros on top of if, tagbody, and goto.  CLOS can be implemented 
entirely in CLOS-free Lisp using macros and functions.  I don't know of 
any other language that has macros this powerful.

I can't imagine anyone having any more difficulty learning Lisp's DO 
than they would learning C's for.  Nor finding DEFUN or DEFSTRUCT or 
DEFCLASS any more complicated than their equivalents in other languages. 
  I find IF a bit irritating because you have to just remember that the 
first parameter is the test, the second is the "then" and the third is 
the "otherwise", but it's easy enough to make your own macro that fixes 
that:

(defmacro if+ (test &key then else)
   (list 'if test then else))

Which can be used like this:

(if+ (string= str1 str2)
      :then "equal"
      :else "different")

>>All of the above describes benefits in automatic formatting, not
>>readability.
>>Some people find it readable, some don't.  I personally don't find it
>>very easy or very difficult.  There are a number of tricks to reading
>>it and writing it.
> 
> I'm finding prefix notation readable but overly verbose. Counting
> parentheses is a real pain though. Assuming the Lisp advocates here also
> know conventional languages then there is plenty of evidence that one can
> learn to read Lisp.

Only for math or for all operations?  It would be quite neat to have a 
language that allowed me to say:

if( a collidesWith b ) then
    bounce a offOf b

but would that be significantly better than the equivalent Lisp?  I 
don't think so.

I find it funny that people constantly want infix, but only for 
mathematical operations.  Even in the most mathematical intensive code I 
wrote, a general CSG library, I found that very few operations I did 
were supported by the built-in mathematical syntax.

However, if you feel you *must* have infix for some arbitrary set of 
mathematical operations, you can always write your own parser in Lisp, 
and then use macro characters to read it in.  See 
<http://www.cliki.net/infix> for one such parser.

>>The easiest way to find out is to delete all the closing parens at the
>>end with Emacs.  You then replace them one by one, the cursor will
>>highlight the opening paren as this happens.  You can then watch them
>>and check they go in the right places.  Once you've finished closing
>>the parens on the line you're on, the rest should match on the lines
>>above to each left-most paren.
> 
> Yes. That is almost exactly what I am doing.

Yuck.  I suggest you find an editor that can auto indent Lisp code, like 
Emacs can.  It is so much easier to find mismatched braces when the code 
is indented.


It's hard for me to describe what I enjoy so much about writing Lisp 
code.  The best I can say is that Lisp is the first language I've used 
where every thing that should be simple to code up was actually simple 
to code up.  For sure, closures, macros, and a simple (but extendable) 
syntax all help.

   -- MJF
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124282903.710351.319290@g43g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Rob Thorpe wrote:
> > Firstly, at the language level, since everything - data and code - is
> > represented as above the language becomes more amenable to automatic
> > operations.
>
> Yes. But is that related to the grammar?

It could be done with a different grammar, yes, though it may be
slightly more difficult.  But it's very obvious when done with the
grammar of lisp, because a human can predict the behaviour of the
automatic operations mentally.

> > Some examples:-
> >
> > 1. If I load my buggy version of your raytracer into lisp and do:
> >
> > (setq foo (ray-sphere ray sph))
> >
> > foo now contains a huge tree.  Should I want this tree in the future
> > (for more debugging f.e.g) I can print it to a file with print, then
> > later read it back with read.
>
> That's cool. I guess the nearest in OCaml is to use the compiler's lexer and
> parser to input the code and then marshall it to disc. Not very elegant...

It's quite useful, it can eliminate the need for data-file
parsers/lexers sometimes.

> > 2. Emacs does not have to guess things about lisp.  With other
> > languages it occasionally has to guess, because it cannot understand
> > the syntax in reasonable time.  When it tells you all the parens match,
> > they match.
>
> Ok. Can you give an example of a time when emacs has to guess?

Things like s%regex%regex% in Perl are one example, lots of syntax
highlighters get them wrong.  In general any situation where the syntax
of the language masks or buries parens or brackets.  Syntax recognition
programs make all kinds of hidden assumptions about code, such as
assuming that the indentation is locally correct when adding more.

> > 3. In a place (a fearful place :) ) that doesn't have emacs all is not
> > lost.  If lisp can read the code it can print it.  So, I can quote the
> > list with a quote mark*, and watch it:
> >
> > '((defun fib (x) (if (<= x 2) 1 (+ (fib (- x 2))(fib (1- x)))))
> >
> > It will then spit it back with whatever default formatting the lisp
> > printer uses for lists (there may also be an option for printing
> > functions in the printer).  The formatting may not be very nice, but if
> > the code is truely hairy it will be more understandable than it was.
> > You can use this sort of this to do things like substituting sub-trees
> > in your code.
>
> Right, I saw that in Sussman's lecture (IIRC).
>
> > (* quote marks are one of the few true bits of syntax)
>
> Yes, "#" and ";" seem to be others. I don't know what "#" does.

The most common syntax is:
"." - pair
"'" - quote
"#'" - function call
"#" - make simple-vector
"#S" - structure

There are several more not often used.  These are all reader-macros.
I describe "read" as though it is simple, because for most purposes of
understand it is.  But underneath it's rather complicated, see
http://www.lisp.org/HyperSpec/Body/sec_2-2.html if you're interested.
Complex reader macro aren't that commonly used thankfully.

> > 4. It makes macros easier to understand since they act on trees.
>
> So you predict that Lisp macros are easier to understand than OCaml's
> macros?

No, I wasn't predicting anything.
I was just say that it's easy to understand what is does because the
text is a tree itself.  I'll have a look at ML macros.

> > All of the above describes benefits in automatic formatting, not
> > readability.
> > Some people find it readable, some don't.  I personally don't find it
> > very easy or very difficult.  There are a number of tricks to reading
> > it and writing it.
>
> I'm finding prefix notation readable but overly verbose. Counting
> parentheses is a real pain though. Assuming the Lisp advocates here also
> know conventional languages then there is plenty of evidence that one can
> learn to read Lisp.

Lots of indentation is needed, that's most of what makes it more
verbose.
Also, in Common lisp, the fact that the function names are so long.

> > The easiest way to find out is to delete all the closing parens at the
> > end with Emacs.  You then replace them one by one, the cursor will
> > highlight the opening paren as this happens.  You can then watch them
> > and check they go in the right places.  Once you've finished closing
> > the parens on the line you're on, the rest should match on the lines
> > above to each left-most paren.
>
> Yes. That is almost exactly what I am doing.

As far as I know that's the main trick to it, that and knowing how many
parens certain constructs start with, for example
(let ((<- two parens
(defun (<- one paren
(labels ((<- two parens

There are probably other tricks old lispers know.

> > It also means errors can't leak syntactically: the error is in one
> > form.  They can certainly leak semantically though.
>
> Yes. I think that the majority of the errors I get in other languages are
> already semantic though.

Me too, it doesn't gain you much.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mg9v2F15mgq3U6@news.dfncis.de>
Jon Harrop <······@jdh30.plus.com> wrote:

>Ok. Can you give an example of a time when emacs has to guess?

Have you never tried to use Emacs with C, C++, Standard ML, awk,
perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
Lisp? It can be a rather frustrating experience, especially since
the editor insists on knowing better than you, and sometimes even
"correcting" indentation even if you don't hit TAB.

mkb.
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124274207.084728.305530@z14g2000cwz.googlegroups.com>
Matthias Buelow wrote:
> Jon Harrop <······@jdh30.plus.com> wrote:
>
> >Ok. Can you give an example of a time when emacs has to guess?
>
> Have you never tried to use Emacs with C, C++, Standard ML, awk,
> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
> Lisp? It can be a rather frustrating experience, especially since
> the editor insists on knowing better than you, and sometimes even
> "correcting" indentation even if you don't hit TAB.

Emacs will indent if you press tab or at the end of a line.
If you don't like the way it does it you have to customize it.  It's
not different from many other syntax sensitive editors in this regard.
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dduuj9$1cp$1@nwrdmz03.dmz.ncs.ea.ibs-infra.bt.com>
"Matthias Buelow" <···@incubus.de> wrote in message 
····················@news.dfncis.de...
> Jon Harrop <······@jdh30.plus.com> wrote:
>
>>Ok. Can you give an example of a time when emacs has to guess?
>
> Have you never tried to use Emacs with C, C++, Standard ML, awk,
> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
> Lisp? It can be a rather frustrating experience, especially since
> the editor insists on knowing better than you, and sometimes even
> "correcting" indentation even if you don't hit TAB.

Emacs knows better.

You, me and everybody else are *wrong*.

Resistance is futile.

Jamie


>
> mkb. 
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mgfgnF16j6hbU1@news.dfncis.de>
Jamie Border <·····@jborder.com> wrote:

>Resistance is futile.

M-x we-are-the-borg

mkb.
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddv1gm$83k$1@nwrdmz01.dmz.ncs.ea.ibs-infra.bt.com>
"Matthias Buelow" <···@incubus.de> wrote in message 
····················@news.dfncis.de...
> Jon Harrop <······@jdh30.plus.com> wrote:
>
>>Ok. Can you give an example of a time when emacs has to guess?
>
> Have you never tried to use Emacs with C, C++, Standard ML, awk,
> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
> Lisp? It can be a rather frustrating experience, especially since
> the editor insists on knowing better than you, and sometimes even
> "correcting" indentation even if you don't hit TAB.

You *can* customise a lot of Emacs' indentation behaviour, though:

http://www.cs.utah.edu/dept/old/texinfo/emacs18/emacs_25.html#SEC150

>
> mkb. 
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mghbqF16iqg7U1@news.dfncis.de>
Jamie Border <·····@jborder.com> wrote:
>
>"Matthias Buelow" <···@incubus.de> wrote in message 
>····················@news.dfncis.de...
>> Jon Harrop <······@jdh30.plus.com> wrote:
>>
>>>Ok. Can you give an example of a time when emacs has to guess?
>>
>> Have you never tried to use Emacs with C, C++, Standard ML, awk,
>> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
>> Lisp? It can be a rather frustrating experience, especially since
>> the editor insists on knowing better than you, and sometimes even
>> "correcting" indentation even if you don't hit TAB.
>
>You *can* customise a lot of Emacs' indentation behaviour, though:
>http://www.cs.utah.edu/dept/old/texinfo/emacs18/emacs_25.html#SEC150

I know.. that doesn't help much. The problem is that Emacs tries
to infer syntactical structure of the source text without actually
parsing it but instead relies on voodoo heuristics combined with
regular expressions on how to do it. Which, as often as not, fails
miserably.  "Modern IDEs" (Eclipse afa I have heard, I've never
used it) contain actual incremental parsers that do a much better
job at understanding the syntax, and hence, provide better results.
However, I personally am deeply suspicious of an editor that tries
to "understand" what I'm typing in. I've never seen it work properly,
and even if it works ok most of the time for some language, it
breaks as soon as the syntax is extended (for example, if I use
some preprocessor on it first), if several languages are mixed
within one file, or if the source is unfinished or deliberately
ungrammatical, or if I simply am using a different style than what
the editor prefers. That whole indentation stuff is probably an
outgrowth of the hey-deys of AI at where Emacs was written (MIT),
where people thought that with simple means they could create
"intelligent" software (DWIM-style).  Whereas I personally don't
like "intelligent" software that's making (sometimes invalid)
assumptions but instead prefer dumb programs that do exactly what
they're told. Language-sensitive indenting should work by hinting
the user (for example, providing commands to move the cursor to a
certain level that coincides with opening parentheses or so) but
never to guess by itself how the text ought to be formatted (and
even worse, more or less force the user into this scheme by
"correcting" the given indentation.  A program, especially one for
creating stuff, should be designed according to the dictum that the
user is always right and always knows better, and not the other way
round.

mkb.
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dEIMe.38$DJ5.68970@typhoon.nyu.edu>
Matthias Buelow wrote:
> Jon Harrop <······@jdh30.plus.com> wrote:
> 
> 
>>Ok. Can you give an example of a time when emacs has to guess?
> 
> 
> Have you never tried to use Emacs with C, C++, Standard ML, awk,
> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
> Lisp? It can be a rather frustrating experience, especially since
> the editor insists on knowing better than you, and sometimes even
> "correcting" indentation even if you don't hit TAB.

How do you dare?!?  You, a mere mortal, pretend knowing how to indent 
code?  The Emacs God is throwing parentheses at you!

Cheers
--
Marco
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <u0ho8rtf.fsf@ccs.neu.edu>
Marco Antoniotti <·······@cs.nyu.edu> writes:

> Matthias Buelow wrote:
>> Jon Harrop <······@jdh30.plus.com> wrote:
>>
>>>Ok. Can you give an example of a time when emacs has to guess?
>> Have you never tried to use Emacs with C, C++, Standard ML, awk,
>> perl, sh, ksh, Pascal, or actually any language other than (Emacs-)
>> Lisp? It can be a rather frustrating experience, especially since
>> the editor insists on knowing better than you, and sometimes even
>> "correcting" indentation even if you don't hit TAB.
>
> How do you dare?!?  You, a mere mortal, pretend knowing how to indent
> code?  The Emacs God is throwing parentheses at you!

From my grimoire:

(put 'multiple-value-bind 'scheme-indent-function 2)

(defconst jrm-c-style
  (setq jrm-c-style
	'((c-basic-offset . 4)
	  (c-comment-only-line-offset . (0 . 0))
	  (c-offsets-alist . ((block-close . c-lineup-whitesmith-in-block)
			      (defun-block-intro . *)
			      (inclass . *)
                             (inline-open . 0)
			      (namespace-open . 0)
			      (namespace-close . 0)
			      ))
	  (c-hanging-braces-alist . ((substatement-open after) (block-open after)))
	  (c-echo-syntactic-information-p . t)))
  "Jrm's C style.")
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <8xz0ad3s.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> I'm finding prefix notation readable but overly verbose. 

Huh?  Infix notation generally allows only two arguments to an
operator.

   a + b + c + d + e

         vs.

   (+ a b c d e)
From: Tayssir John Gabbour
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124287783.107846.276490@g43g2000cwa.googlegroups.com>
Joe Marshall wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
> > I'm finding prefix notation readable but overly verbose.
>
> Huh?  Infix notation generally allows only two arguments to an
> operator.
>
>    a + b + c + d + e
>
>          vs.
>
>    (+ a b c d e)

Aren't most languages prefix anyway? Most of their function calls are
like:

f(x,y,z)

instead of the similar

(f x y z)


Incidentally, I think there are also syntaxes hidden in plain view, in
Lisp. Numbers have unlispy syntax, for example.

And we could probably simplify Lisp's syntax even further -- for
example, only having special operators in the first position, so
someone would have to use FUNCALL or MACROCALL if they wanted functions
and macros, respectively. (I wonder if that makes sense...)


Tayssir
--
http://www.zmag.org/znetaudio.html
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43034ca5$0$97124$ed2619ec@ptn-nntp-reader03.plus.net>
Joe Marshall wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> I'm finding prefix notation readable but overly verbose.
> 
> Huh?  Infix notation generally allows only two arguments to an
> operator.
> 
>    a + b + c + d + e
> 
>          vs.
> 
>    (+ a b c d e)

You mean "a+b+c+d+e", which is shorter. For slightly more complicated
expressions, the gap widens:

  -a+b*c/d
  (+ (- a) (* b (/ c d)))

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ······@earthlink.net
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124290737.313073.21360@z14g2000cwz.googlegroups.com>
>   -a+b*c/d
>   (+ (- a) (* b (/ c d)))

Of course, the spaces could be dropped from the second if the two
examples used the same definition of "variable name".

However, the ambiguity of the first remains.  The amount of time and
money that such ambiguity costs dwarfs any savings.
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <wHIMe.39$DJ5.68970@typhoon.nyu.edu>
Jon Harrop wrote:
> Joe Marshall wrote:
> 
>>Jon Harrop <······@jdh30.plus.com> writes:
>>
>>>I'm finding prefix notation readable but overly verbose.
>>
>>Huh?  Infix notation generally allows only two arguments to an
>>operator.
>>
>>   a + b + c + d + e
>>
>>         vs.
>>
>>   (+ a b c d e)
> 
> 
> You mean "a+b+c+d+e", which is shorter. For slightly more complicated
> expressions, the gap widens:
> 
>   -a+b*c/d
>   (+ (- a) (* b (/ c d)))
> 

Not using spaces in infix code is evil.  Have you seen the GNU coding 
guidelines?

Cheers
--
Marco
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mhf5cF170bd3U1@news.dfncis.de>
Marco Antoniotti <·······@cs.nyu.edu> wrote:

>Not using spaces in infix code is evil.

Nonsense.

>Have you seen the GNU coding 
>guidelines?

Since when are they authoritative? Since when are they good style?
("Gnu" indented C looks like some betentacled thing from outer space).

mkb.
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <HlMMe.40$DJ5.68429@typhoon.nyu.edu>
Matthias Buelow wrote:
> Marco Antoniotti <·······@cs.nyu.edu> wrote:
> 
> 
>>Not using spaces in infix code is evil.
> 
> 
> Nonsense.
> 

You are dead wrong. Not using whitespace in infix notation is evil. 
Especially in C. Period.

>>Have you seen the GNU coding 
>>guidelines?
> 
> 
> Since when are they authoritative? Since when are they good style?
> ("Gnu" indented C looks like some betentacled thing from outer space).

The GNU coding guidelines are authoritative as they state the obvious. 
Spaces are necessary to make code readable and not drift too quickly 
toward the "Obfuscated C guidelines".

Yes, I agree that in some occasions the GNU coding guidelines go too 
far, but the reason they are good is that if somebody reads a piece of C 
(or whatever) code while being from the evil "spaces are unnecessary" 
camp, that person is not nauseated as much as a person from the good 
camp becomes when he or she reads unspaced C (or whatever) code.  :)

Cheers
--
Marco
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mhsp9F15qic7U1@news.dfncis.de>
Marco Antoniotti <·······@cs.nyu.edu> wrote:

>The GNU coding guidelines are authoritative as they state the obvious. 
>Spaces are necessary to make code readable and not drift too quickly 
>toward the "Obfuscated C guidelines".

I very much prefer:

    f(x*y+n*z, 3*a, 9+(5*x));

over

    f(x * y + n * z, 3 * a, 9 + (5 * x));

mkb.
From: Robert Uhl
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m38xz0ktuh.fsf@4dv.net>
Matthias Buelow <···@incubus.de> writes:
>
> I very much prefer:
>
>     f(x*y+n*z, 3*a, 9+(5*x));
>
> over
>
>     f(x * y + n * z, 3 * a, 9 + (5 * x));

_I_ very much prefer:

  function(taxes*rate + value*foo,
           3*eggshells,
           9 + 5*taxes);

But I'm weird:-)

-- 
Robert Uhl <http://public.xdi.org/=ruhl>
That's how you know you're hooked on something; when it makes you forget
to drink beer.                     --Paul Mather, commenting on The Sims
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mi17jF16ucs9U1@news.dfncis.de>
Robert Uhl <·········@nospamgmail.com> wrote:

>  function(taxes*rate + value*foo,
>           3*eggshells,
>           9 + 5*taxes);

You're paying your taxes with eggshells?

mkb.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mivgmF179pdhU1@news.dfncis.de>
Stefan Ram <···@zedat.fu-berlin.de> wrote:

[...]

I'd say these rules make your code exceptionally hard to read for
most people.

mkb.
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <y86z6ye9.fsf@ccs.neu.edu>
···@zedat.fu-berlin.de (Stefan Ram) writes:

>   I indent Lisp by the same rules, for example:
>
> ( defun factorial( x )
>   ( if( eql x 0 )
>     1 
>     ( * x( factorial( - x 1 )))))

Yuck!  I feel like someone injected expanding foam in my subexpressions!

~jrm
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124353529.784552.81620@o13g2000cwo.googlegroups.com>
Matthias Buelow wrote:
> Marco Antoniotti <·······@cs.nyu.edu> wrote:
>
> >The GNU coding guidelines are authoritative as they state the obvious.
> >Spaces are necessary to make code readable and not drift too quickly
> >toward the "Obfuscated C guidelines".
>
> I very much prefer:
>
>     f(x*y+n*z, 3*a, 9+(5*x));
>
> over
>
>     f(x * y + n * z, 3 * a, 9 + (5 * x));

I very much hope I never have to modify any of your programs :)

If anyone wrote arithmetic without whitespace separating it in any
programs I maintain they would be taken outside and shot.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mivlrF179pdhU2@news.dfncis.de>
Rob Thorpe <·············@antenova.com> wrote:

>If anyone wrote arithmetic without whitespace separating it in any
>programs I maintain they would be taken outside and shot.

Ah yes.. why bother with common sense, when sheer dumb brutality
also works...

mkb.
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124355556.811654.300510@g47g2000cwa.googlegroups.com>
Matthias Buelow wrote:
> Rob Thorpe <·············@antenova.com> wrote:
>
> >If anyone wrote arithmetic without whitespace separating it in any
> >programs I maintain they would be taken outside and shot.
>
> Ah yes.. why bother with common sense, when sheer dumb brutality
> also works...

Absolutely :)

>From a particularly ugly piece of C I maintain:
Imagine if

	radius = (int)(size *
                 ((float)(min - *(result_ptr + (MAX_S21_FREQS +
1)						  + freqs)) / (min - max)));
Where written:-
	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
                   +freqs))/(min-max)));

Can you read the second version, I certainly can't?  Ask 10 programmers
which they would find more readable, I expect most of them will pick
the first.

It doesn't really matter with small arithmetic expressions though.
Thankfully, I've never met a programmer who doesn't put whitespace in
expressions, so I haven't gathered any manslaughter convictions yet.
From: M Jared Finder
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <Mu2dnYYlNKXDNJneRVn-vA@speakeasy.net>
Rob Thorpe wrote:
> Matthias Buelow wrote:
>>Rob Thorpe <·············@antenova.com> wrote:
>>
>>>If anyone wrote arithmetic without whitespace separating it in any
>>>programs I maintain they would be taken outside and shot.
>>
>>Ah yes.. why bother with common sense, when sheer dumb brutality
>>also works...
> 
> Absolutely :)
> 
>>From a particularly ugly piece of C I maintain:
> Imagine if
> 
> 	radius = (int)(size *
>                  ((float)(min - *(result_ptr + (MAX_S21_FREQS +
> 1)						  + freqs)) / (min - max)));
> Where written:-
> 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
>                    +freqs))/(min-max)));
> 
> Can you read the second version, I certainly can't?  Ask 10 programmers
> which they would find more readable, I expect most of them will pick
> the first.

They're both unreadable.  Where's my abstraction?  Where's my higher 
level concepts?  I'd prefer to read something that gave a name to the 
operations being performed:

radius = size * range_normalize( min, max,
                                  result_ptr[MAX_S21_FREQS + 1 + freqs]);

// the opposite of interpolate
// x == interpolate( min, max, range_normalize( min, max, x ))
// (within floating point error, of course)
float range_normalize( int value, int min, int max ) {
   return (float)(value - min) / (float)(max - min);
}

   -- MJF
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124705095.717707.212070@f14g2000cwb.googlegroups.com>
M Jared Finder wrote:
> Rob Thorpe wrote:
> > Matthias Buelow wrote:
> >>Rob Thorpe <·············@antenova.com> wrote:
> >>
> >>>If anyone wrote arithmetic without whitespace separating it in any
> >>>programs I maintain they would be taken outside and shot.
> >>
> >>Ah yes.. why bother with common sense, when sheer dumb brutality
> >>also works...
> >
> > Absolutely :)
> >
> >>From a particularly ugly piece of C I maintain:
> > Imagine if
> >
> > 	radius = (int)(size *
> >                  ((float)(min - *(result_ptr + (MAX_S21_FREQS +
> > 1)						  + freqs)) / (min - max)));
> > Where written:-
> > 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
> >                    +freqs))/(min-max)));
> >
> > Can you read the second version, I certainly can't?  Ask 10 programmers
> > which they would find more readable, I expect most of them will pick
> > the first.
>
> They're both unreadable.  Where's my abstraction?  Where's my higher
> level concepts?  I'd prefer to read something that gave a name to the
> operations being performed:
>
> radius = size * range_normalize( min, max,
>                                   result_ptr[MAX_S21_FREQS + 1 + freqs]);
>
> // the opposite of interpolate
> // x == interpolate( min, max, range_normalize( min, max, x ))
> // (within floating point error, of course)
> float range_normalize( int value, int min, int max ) {
>    return (float)(value - min) / (float)(max - min);
> }

That's a good idea, I hadn't thought of adding a function like that.
I'll do it.
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mj1uhF173146U1@news.dfncis.de>
Rob Thorpe <·············@antenova.com> wrote:

>>From a particularly ugly piece of C I maintain:
>Imagine if
>
>        radius = (int)(size *
>                 ((float)(min - *(result_ptr + (MAX_S21_FREQS +
>1)                                                + freqs)) / (min - max)));
>Where written:-
>        radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
>                   +freqs))/(min-max)));
>
>Can you read the second version, I certainly can't?  Ask 10 programmers
>which they would find more readable, I expect most of them will pick
>the first.

Actually I find both completely unproblematic to read.
But I'd prefer something like:

  radius = (int)(size * ((float)(min - *(result_ptr+(MAX_S21_FREQS+1)+freqs))
			/ (min-max)))

That is a term where neither the whitespace fetishist has been
carried away by his desire, nor where CompactoMan has left his
mark.

The one thing I mildly object to is missing whitespace after comma,
that is foo(frobb,knurl,gork) instead of foo(frobb, knurl, gork).
But that isn't a tragedy either.

A true annoyance is putting whitespace after opening parentheses or
square parens, like a[ b ], if ( foo ), especially when it's nested,
like if ( a[ b [ c ] ] || foo( bar )).
That I find significantly harder to read than normal style since it
imho breaks the flow of reading.

>It doesn't really matter with small arithmetic expressions though.
>Thankfully, I've never met a programmer who doesn't put whitespace in
>expressions, so I haven't gathered any manslaughter convictions yet.

Eh.. weird.

mkb.
From: Michael Sullivan
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1h1hhf5.1dyrk4l1ayczinN%use-reply-to@spambegone.null>
Matthias Buelow <···@incubus.de> wrote:

>   radius = (int)(size * ((float)(min - *(result_ptr+(MAX_S21_FREQS+1)+freqs))
>           / (min-max)))

> That is a term where neither the whitespace fetishist has been
> carried away by his desire, nor where CompactoMan has left his
> mark.

I sort of agree.  My practice in C et. al. is to space out the lower
precedence operators in an expression with operators of different
precedences.  For this makes precedence jump out when reading code.  I
still use parens where a reader is unlikely to remember the precedence,
but not for things like AND/OR or +/*.  So I'd write:

a = (b^baz + c*foo)/5

To me, this looks closer to the way I'd write a math expression with a
pen or set it in Tex.  

I'm not font of reading code where every operator is spaced, and I like
reading code where no operator is spaced even less. Given a choice
between the two extremes, I'd take spacing, but I greatly prefer an
authored mixture that gives intentional groupings based on some kind of
logic (if not precedence, then some kind of grouping logic that makes
domain sense).


Michael
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87wtmj874n.fsf@thalassa.informatimago.com>
············@spambegone.null (Michael Sullivan) writes:
> So I'd write:
>
> a = (b^baz + c*foo)/5

Bang! 

Anybody will think that you want: a=((b^baz)+(c*foo))/5
but what you actually get is:     a=(b^(baz+(c*foo)))/5

 +---------------------------------------------------------------------+
 |  The only safe way to write expressions in C is to put parentheses  |
 |  around all operators, and to forget spaces because they'll make    |
 |  the reader make false assumptions.                                 |
 +---------------------------------------------------------------------+


-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

There is no worse tyranny than to force a man to pay for what he does not
want merely because you think it would be good for him. -- Robert Heinlein
From: Michael Sullivan
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1h1hw9t.bfvut41gmrhw3N%use-reply-to@spambegone.null>
Pascal Bourguignon <····@mouse-potato.com> wrote:

> ············@spambegone.null (Michael Sullivan) writes:
> > So I'd write:

> > a = (b^baz + c*foo)/5
 
> Bang! 

> Anybody will think that you want: a=((b^baz)+(c*foo))/5
> but what you actually get is:     a=(b^(baz+(c*foo)))/5

Doh!

That's what I get for not doing any math in C for years.  I knew there
was at least one operator precedence decision that made fuck-all sense
to me.  That would be it.

I still prefer spacing to look like math expressions.  

But of course I prefer lisp more in spite of the extra characters and
still finding prefix slightly unnatural for math.


Michael
From: Brian Downing
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <C75Ne.39370$084.8561@attbi_s22>
In article <···································@spambegone.null>,
Michael Sullivan <·······@bcect.com> wrote:
> That's what I get for not doing any math in C for years.  I knew there
> was at least one operator precedence decision that made fuck-all sense
> to me.  That would be it.

Well, it kind of makes fuck-all sense because ^ is LOGXOR, not EXPT.  :)

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <u0hn6yco.fsf@ccs.neu.edu>
"Rob Thorpe" <·············@antenova.com> writes:

> Matthias Buelow wrote:
>> Rob Thorpe <·············@antenova.com> wrote:
>>
>> >If anyone wrote arithmetic without whitespace separating it in any
>> >programs I maintain they would be taken outside and shot.
>>
>> Ah yes.. why bother with common sense, when sheer dumb brutality
>> also works...
>
> Absolutely :)
>
>>From a particularly ugly piece of C I maintain:
> Imagine if
>
> 	radius = (int)(size *
>                  ((float)(min - *(result_ptr + (MAX_S21_FREQS +
> 1)						  + freqs)) / (min - max)));
> Where written:-
> 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
>                    +freqs))/(min-max)));
>
> Can you read the second version, I certainly can't?  Ask 10 programmers
> which they would find more readable, I expect most of them will pick
> the first.

I can't read either.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87wtmja4s9.fsf@thalassa.informatimago.com>
"Rob Thorpe" <·············@antenova.com> writes:
>>From a particularly ugly piece of C I maintain:
> Imagine if
>
> 	radius = (int)(size *
>                  ((float)(min - *(result_ptr + (MAX_S21_FREQS +
> 1)						  + freqs)) / (min - max)));
> Where written:-
> 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
>                    +freqs))/(min-max)));
>
> Can you read the second version, I certainly can't?  Ask 10 programmers
> which they would find more readable, I expect most of them will pick
> the first.

I find the second more readable, but not perfect. I'd write it:

 	radius=(int)(size*((float)(min-(*(result_ptr+MAX_S21_FREQS+1+freqs))))
                     /(min-max));

Or even:

 	radius=(int)(size*((float)(min-result_ptr[MAX_S21_FREQS+1+freqs]))
                     /(min-max));

> It doesn't really matter with small arithmetic expressions though.
> Thankfully, I've never met a programmer who doesn't put whitespace in
> expressions, so I haven't gathered any manslaughter convictions yet.

Now you have.


-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
Small brave carnivores
Kill pine cones and mosquitoes
Fear vacuum cleaner
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124365997.001234.263970@f14g2000cwb.googlegroups.com>
Pascal Bourguignon wrote:
> "Rob Thorpe" <·············@antenova.com> writes:
> >>From a particularly ugly piece of C I maintain:
> > Imagine if
> >
> > 	radius = (int)(size *
> >                  ((float)(min - *(result_ptr + (MAX_S21_FREQS +
> > 1)						  + freqs)) / (min - max)));
> > Where written:-
> > 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
> >                    +freqs))/(min-max)));
> >
> > Can you read the second version, I certainly can't?  Ask 10 programmers
> > which they would find more readable, I expect most of them will pick
> > the first.
>
> I find the second more readable, but not perfect. I'd write it:
>
>  	radius=(int)(size*((float)(min-(*(result_ptr+MAX_S21_FREQS+1+freqs))))
>                      /(min-max));
>
> Or even:
>
>  	radius=(int)(size*((float)(min-result_ptr[MAX_S21_FREQS+1+freqs]))
>                      /(min-max));

Making it an array lookup would certainly make it clearer.
The phrase (MAX_S21_FREQS + 1) refers to something specific in the
code, it's in parens to emphasis that the 1 is being added to
MAX_S21_FREQS.

I could certainly read your versions, though I wouldn't prefer them.
In particular, reading lisp causes me to see "min-result_ptr" as a
single identifier at first glance.

> > It doesn't really matter with small arithmetic expressions though.
> > Thankfully, I've never met a programmer who doesn't put whitespace in
> > expressions, so I haven't gathered any manslaughter convictions yet.
> 
> Now you have.

Two in fact...  I'm getting worried :)
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87d5oba1sq.fsf@thalassa.informatimago.com>
"Rob Thorpe" <·············@antenova.com> writes:
>> Or even:
>>
>>  	radius=(int)(size*((float)(min-result_ptr[MAX_S21_FREQS+1+freqs]))
>>                      /(min-max));
> [...]
> I could certainly read your versions, though I wouldn't prefer them.
> In particular, reading lisp causes me to see "min-result_ptr" as a
> single identifier at first glance.

:-) I've not written much C since I do CL...


-- 
"Debugging?  Klingons do not debug! Our software does not coddle the
weak."
From: Patrick May
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m21x4r8nlo.fsf@patrick.intamission.com>
Pascal Bourguignon <····@mouse-potato.com> writes:
> "Rob Thorpe" <·············@antenova.com> writes:
> > Imagine if
> >
> > 	radius = (int)(size *
> >                  ((float)(min - *(result_ptr + (MAX_S21_FREQS + 1)
> >						  + freqs)) / (min - max)));
> > Were written:
> > 	radius=(int)(size*((float)(min-*(result_ptr+(MAX_S21_FREQS+1)
> >                    +freqs))/(min-max)));
> >
> > Can you read the second version, I certainly can't?  Ask 10
> > programmers which they would find more readable, I expect most of
> > them will pick the first.
> 
> I find the second more readable, but not perfect. I'd write it:
> 
>  	radius=(int)(size*((float)(min-(*(result_ptr+MAX_S21_FREQS+1+freqs))))
>                      /(min-max));

     I must be a complete whitespace freak compared to you.  The casts
make it tricky, but I'd format it something like this (must be viewed
in a fixed font):

    radius = (int)(size
                   * ((float)(min - *(result_ptr
                                      + (MAX_S21_FREQS + 1)
                                      + freqs))
                      / (min - max)));

All of the operators line up with the first term of their respective
subexpressions and the internal groupings are clear.  Emacs will
ensure that any changes modify the indentation to reflect what's
really going on.

Just another data point,

Patrick

------------------------------------------------------------------------
S P Engineering, Inc.    | The experts in large scale distributed OO
                         | systems design and implementation.
          ···@spe.com    | (C++, Java, Common Lisp, Jini, CORBA, UML)
From: Dan Schmidt
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ur7crl69x.fsf@turangalila.harmonixmusic.com>
Patrick May <···@spe.com> writes:

|      I must be a complete whitespace freak compared to you.  The casts
| make it tricky, but I'd format it something like this (must be viewed
| in a fixed font):
|
|     radius = (int)(size
|                    * ((float)(min - *(result_ptr
|                                       + (MAX_S21_FREQS + 1)
|                                       + freqs))
|                       / (min - max)));

None of you would introduce temporary variables?  I'd be more inclined
to write something like

  float val     = *(result_ptr + (MAX_S21_FREQS + 1) + freqs);
  float ratio   = (min - val) / (min - max);
  int   radius  = size * ratio;

I don't know what any of these variables mean, though, so I can't
really give them very good names.
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124379836.951006.180060@f14g2000cwb.googlegroups.com>
Dan Schmidt wrote:
> Patrick May <···@spe.com> writes:
>
> |      I must be a complete whitespace freak compared to you.  The casts
> | make it tricky, but I'd format it something like this (must be viewed
> | in a fixed font):
> |
> |     radius = (int)(size
> |                    * ((float)(min - *(result_ptr
> |                                       + (MAX_S21_FREQS + 1)
> |                                       + freqs))
> |                       / (min - max)));
>
> None of you would introduce temporary variables?  I'd be more inclined
> to write something like
>
>   float val     = *(result_ptr + (MAX_S21_FREQS + 1) + freqs);
>   float ratio   = (min - val) / (min - max);
>   int   radius  = size * ratio;
>
> I don't know what any of these variables mean, though, so I can't
> really give them very good names.

Actually, much of the code reads:-
value = *(result_ptr + (MAX_SOMETHING + 1) + freqs);
...
cur_foo = (int)size * (float)(min - value) / (min - max);
Because "value" is used several times

This line is rather a bad one.
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <tj2Ne.42$DJ5.69116@typhoon.nyu.edu>
Patrick May wrote:

>      I must be a complete whitespace freak compared to you.  The casts
> make it tricky, but I'd format it something like this (must be viewed
> in a fixed font):
> 
>     radius = (int)(size
>                    * ((float)(min - *(result_ptr
>                                       + (MAX_S21_FREQS + 1)
>                                       + freqs))
>                       / (min - max)));
> 
> All of the operators line up with the first term of their respective
> subexpressions and the internal groupings are clear.  Emacs will
> ensure that any changes modify the indentation to reflect what's
> really going on.

The fact that Emacs will correctly indent the above should be the end of 
this story. :)  The indentation is correct.

Cheers
--
Marco
From: Marco Antoniotti
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <De2Ne.41$DJ5.69116@typhoon.nyu.edu>
Matthias Buelow wrote:
> Marco Antoniotti <·······@cs.nyu.edu> wrote:
> 
> 
>>The GNU coding guidelines are authoritative as they state the obvious. 
>>Spaces are necessary to make code readable and not drift too quickly 
>>toward the "Obfuscated C guidelines".
> 
> 
> I very much prefer:
> 
>     f(x*y+n*z, 3*a, 9+(5*x));
> 
> over
> 
>     f(x * y + n * z, 3 * a, 9 + (5 * x));
> 
> mkb.

Again the difference is that when you are reading the second one (which 
is a simplified case) you do not get as much of an headache as when I 
read the first.  At a minimum you are insensitive toward other people's 
health :)

Cheers
--
Marco
From: John
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <slrndg791u.11a9.NkHA32Am@mailinator.com>
On 2005-08-17, Matthias Buelow <···@incubus.de> wrote:
>  ("Gnu" indented C looks like some betentacled thing from outer space).

I'm glad I'm not alone in that belief. GNU indented C code is crap.
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m364u4bmre.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> I'm finding prefix notation readable but overly verbose.
> > 
> > Huh?  Infix notation generally allows only two arguments to an
> > operator.
> > 
> >    a + b + c + d + e
> > 
> >          vs.
> > 
> >    (+ a b c d e)
> 
> You mean "a+b+c+d+e", which is shorter.

But actually more _verbose_.


> For slightly more complicated
> expressions, the gap widens:
> 
>   -a+b*c/d
>   (+ (- a) (* b (/ c d)))

In _some_ contexts this can be true, which is why there is infix
available (as noted earlier).


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <pssc8qn6.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>>> I'm finding prefix notation readable but overly verbose.
>> 
>> Huh?  Infix notation generally allows only two arguments to an
>> operator.
>> 
>>    a + b + c + d + e
>> 
>>          vs.
>> 
>>    (+ a b c d e)
>
> You mean "a+b+c+d+e", which is shorter. 

No, I meant   Math.Add(a, b, c, d, e)
which is longer.
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43035093$0$43188$edfadb0f@dread12.news.tele.dk>
Jon Harrop wrote:
> You mean "a+b+c+d+e", which is shorter. For slightly more complicated
> expressions, the gap widens:
> 
>   -a+b*c/d
>   (+ (- a) (* b (/ c d)))

One can always cheat and use infix.cl which allows one to write e.g.

    #I( x^^2 + y^^2 )  in stead of  (+ (expt x 2) (expt y 2))

and

     #I(if x<y<=z then f(x)=x^^2+y^^2 else f(x)=x^^2-y^^2)

in stead of

      (IF (AND (< X Y) (<= Y Z))
          (SETF (F X) (+ (EXPT X 2) (EXPT Y 2)))
          (SETF (F X) (- (EXPT X 2) (EXPT Y 2))))

See

<http://www.cs.cmu.edu/afs/cs/project/ai-repository/ai/lang/lisp/code/syntax/infix/infix.cl>

for a complete list of operators and their precedence.

-- 
Jens Axel S�gaard
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <_2TMe.1806$Dd.7607@newscontent-01.sprint.ca>
Jon Harrop wrote:
> Joe Marshall wrote:
> 
>>Jon Harrop <······@jdh30.plus.com> writes:
>>
>>>I'm finding prefix notation readable but overly verbose.
>>
>>Huh?  Infix notation generally allows only two arguments to an
>>operator.
>>
>>   a + b + c + d + e
>>
>>         vs.
>>
>>   (+ a b c d e)
> 
> 
> You mean "a+b+c+d+e", which is shorter. For slightly more complicated
> expressions, the gap widens:
> 
>   -a+b*c/d
>   (+ (- a) (* b (/ c d)))

if the arity of the operators is known and fixed, you can omit the 
parentheses:

  + ~a * b / c d
(~ standing for unary minus), or, if you have fixed identifier lengths 
(1 char)

+~a*b/cd

hs
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de3b1a$ie1$1@ulric.tng.de>
Jon Harrop schrieb:
> Joe Marshall wrote:
> 
>>Jon Harrop <······@jdh30.plus.com> writes:
>>
>>>I'm finding prefix notation readable but overly verbose.
>>
>>Huh?  Infix notation generally allows only two arguments to an
>>operator.
>>
>>   a + b + c + d + e
>>
>>         vs.
>>
>>   (+ a b c d e)
> 
> 
> You mean "a+b+c+d+e", which is shorter.

It is not only shorter but also not so easy to read anymore.
When I code in C/PHP/Java I always write
a + b + c + d + e

Besides that, we don't earn anything by repeating + three times.
What version is more verbose?


> For slightly more complicated
> expressions, the gap widens:
> 
>   -a+b*c/d


Huh? what does that mean?
I would parse it as:
(-a) + ((b*c) / d)

and this is how I would write it in other languages than Lisp.

>   (+ (- a) (* b (/ c d)))

Hmm...
(-a) + (b * (c/d))  =  -a+b*c/d ?

No really, I don't like operator precedence. You could not express 
exactly what you wanted to say with -a+b*c/d.


Andr�
-- 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305bb97$0$22906$ed2619ec@ptn-nntp-reader01.plus.net>
Andr� Thieme wrote:
> No really, I don't like operator precedence. You could not express
> exactly what you wanted to say with -a+b*c/d.

No. Objectively, it works in most programming languages, so I certainly can
express exactly what I want that way.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ······@earthlink.net
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124465235.776144.326770@o13g2000cwo.googlegroups.com>
> No. Objectively, it works in most programming languages, so I certainly can
> express exactly what I want that way.

It may "work in most programming languages", but it means different
things in many of those languages, which pretty much kills any "it's
better/natural" argument.

And, Harrop still hasn't figured out that his character count argument
has a strong dependence on the characters allowed in variable names.
Common Lisp happens to allow more characters in symbol names than most
languages.  If C/C++/Caml did the same, he couldn't omit spaces around
infix operator tokens.

-andy
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305fdde$0$17470$ed2e19e4@ptn-nntp-reader04.plus.net>
······@earthlink.net wrote:
>> No. Objectively, it works in most programming languages, so I certainly
>> can express exactly what I want that way.
> 
> It may "work in most programming languages", but it means different
> things in many of those languages,

Can you be more specific? It means the same thing in Java, C, C++, C#, SML,
OCaml, BASIC, Fortran, Pascal and most other languages that I can think of.
Do you mean the types might be different?

> which pretty much kills any "it's better/natural" argument.

I'm not saying it is natural. I'm saying it is widely understood and more
concise.

> And, Harrop still hasn't figured out that his character count argument
> has a strong dependence on the characters allowed in variable names.
> Common Lisp happens to allow more characters in symbol names than most
> languages.  If C/C++/Caml did the same, he couldn't omit spaces around
> infix operator tokens.

You're saying if C/C++/Caml used Lisp syntax then the expression I gave
wouldn't be valid in C/C++/Caml? Yes, of course. But they don't...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: M Jared Finder
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <h5-dnTz169u7m5veRVn-1w@speakeasy.net>
Jon Harrop wrote:
> ······@earthlink.net wrote:
> 
>>>No. Objectively, it works in most programming languages, so I certainly
>>>can express exactly what I want that way.
>>
>>It may "work in most programming languages", but it means different
>>things in many of those languages,
> 
> 
> Can you be more specific? It means the same thing in Java, C, C++, C#, SML,
> OCaml, BASIC, Fortran, Pascal and most other languages that I can think of.
> Do you mean the types might be different?
> 
> 
>>which pretty much kills any "it's better/natural" argument.
> 
> 
> I'm not saying it is natural. I'm saying it is widely understood and more
> concise.
> 
> 
>>And, Harrop still hasn't figured out that his character count argument
>>has a strong dependence on the characters allowed in variable names.
>>Common Lisp happens to allow more characters in symbol names than most
>>languages.  If C/C++/Caml did the same, he couldn't omit spaces around
>>infix operator tokens.
> 
> You're saying if C/C++/Caml used Lisp syntax then the expression I gave
> wouldn't be valid in C/C++/Caml? Yes, of course. But they don't...

No, he's just saying that if you could create variable names with 
operator characters in their name (or even if some operators were 
multiple characters!), you'd need to use spaces to seperate each 
operator.  Consider the following C code:

difference = first-sum - second-sum;

Did I intend in the first example to subtract two variables (first-sum 
and second-sum) or to subtract (+ second (* 2 sum)) from first?

----x;  // compare against "- - - -x"

Does this return (- x 2) or x?

It seems to me that C/C++/Caml's rules are more human -- they
*often, but not always* do what I want, where Lisp's rules are more 
robotic.  As long as you are not writing metaprograms, it's a matter of 
taste which you prefer.  But as soon as you start writing metaprograms, 
Lisp's way is clearly superior.  Just look at the amount of parenthesis 
used when writing C preprocessor metaprograms.

   -- MJF
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430612ab$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
M Jared Finder wrote:
> It seems to me that C/C++/Caml's rules are more human -- they
> *often, but not always* do what I want, where Lisp's rules are more
> robotic.  As long as you are not writing metaprograms, it's a matter of
> taste which you prefer.

I think this argument should be made invalid by the use of type setting
IDEs, like Mathematica's. They remove most syntactic ambiguity. It's a
shame that they're not more common, but I'm working on it...

> But as soon as you start writing metaprograms, 
> Lisp's way is clearly superior.  Just look at the amount of parenthesis
> used when writing C preprocessor metaprograms.

Other people are telling me that I am wrong here but I believe Mathematica
has equivalent capability without having to resort to prefix notation. For
example:

In[1]:= f[-a + b*c/d] := 1

In[2]:= f[-a + b*c/d]
Out[2]= 1

When infix notation fails, you can resort to the function call equivalents.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124476929.009838.46750@o13g2000cwo.googlegroups.com>
Jon Harrop wrote:
> ······@earthlink.net wrote:
> >> No. Objectively, it works in most programming languages, so I certainly
> >> can express exactly what I want that way.
> >
> > It may "work in most programming languages", but it means different
> > things in many of those languages,
>
> Can you be more specific? It means the same thing in Java, C, C++, C#, SML,
> OCaml, BASIC, Fortran, Pascal and most other languages that I can think of.
> Do you mean the types might be different?

That's one part of it.

In Smalltalk -a+b*c/d would be read left to right, unlike most other
languages.

If the types were different then in the C derived languages some
coercions would take place to expand the types of some variables to
match the others in the expression.  For example if one of c and d are
floating types then the resultant is floating type too.  If both are
integral then C like languages will perform an integral division and
throw away the remainder without rounding.  Also, if the result of this
expression were assigned to another variable like this:
x=-a+b*c/d;
then in C there would be a coercion performed by the assignment, unlike
some of the other languages.

In Pascal (or I think SML) you can't divide integrals with /.

Some Fortran standards don't define associativity unlike the C like
languages.  The compile picks the order of evaluation.  The newer
standard define it as left to right, but not Fortran 77 (AFAICR).

In Pascal unary and binary "-" have the same prescedence, in C like
languages unary is higher.

In different version of the C standard / may behave in different ways
with respect to rounding.  (It's not portable in C89 but it is in C99)

Division by zero in "b*c/d" will store a NaN in some languages, in
others it will give an error.

And I haven't mentioned what happens if any of a, b, c or d are
functions (or macros).

> > which pretty much kills any "it's better/natural" argument.
>
> I'm not saying it is natural. I'm saying it is widely understood and more
> concise.

I would agree with that referring to the mathematical expression.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87r7csbpwv.fsf@thalassa.informatimago.com>
Joe Marshall <···@ccs.neu.edu> writes:

> Jon Harrop <······@jdh30.plus.com> writes:
>
>> I'm finding prefix notation readable but overly verbose. 
>
> Huh?  Infix notation generally allows only two arguments to an
> operator.
>
>    a + b + c + d + e

Mathematicians write it as:

        ∑ i
 i in {a,b,c,d,e}

or:

        4
        ∑  x_i
       i=0


>          vs.
>
>    (+ a b c d e)

(defun ∑ (&rest args) (apply (function +) args))
(∑ a b c d e)

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/
You never feed me.
Perhaps I'll sleep on your face.
That will sure show you.
From: Christophe Rhodes
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <sqmzngcx2v.fsf@cam.ac.uk>
Pascal Bourguignon <····@mouse-potato.com> writes:

> (defun ∑ (&rest args) (apply (function +) args))
> (∑ a b c d e)

I can't actually read this, thanks to an ill-timed X server upgrade,
but along the same vein is
<http://www-jcsu.jesus.cam.ac.uk/~csr21/hmm.lisp> -- there is a Σ
(forgive me if that doesn't make it through...) macro which helps to
mirror the cited paper's notation quite closely.

Christophe
From: Christophe Rhodes
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <sq8xz1by4s.fsf@cam.ac.uk>
Jon Harrop <······@jdh30.plus.com> writes:

> Christophe Rhodes wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>>> Is Lisp code not made less maintainable because of all those brackets?
>> 
>> No, it is made more maintainable because of all those brackets,
>> because it is straightforward to write tools which can manipulate the
>> textual representation of your program, and because human programmers
>> do not read the brackets.
>
> I don't think that makes sense. Continuing that line of thinking, Whitespace
> and Brainf*** are the most maintainable languages.

Did you read just the first half of my sentence?  Human programmers do
not read the brackets, but instead read the indentation.

> Consider the example:
>
> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))

This is not production-quality maintainable lisp code, because it
ignores the conventions that have developed and are supported by the
tools.  However, if I take your mangled example, place it in a lisp
buffer and ask my editor to reformat it (C-M-q in emacs, for
instance), I get

(defun fib (x)                               
  (if (<= x 2)                       
      1                              
      (+ (fib (- x 2))(fib (1- x)))))

and the code obeys more of the conventions: it is much easier to read.
(Still not perfect, because there should be a space between the "))"
and the "(", but good enough).

If you are counting or reading brackets manually when writing lisp
code, you are doing something wrong.

Christophe
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43026d85$0$17486$ed2e19e4@ptn-nntp-reader04.plus.net>
Christophe Rhodes wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> I don't think that makes sense. Continuing that line of thinking,
>> Whitespace and Brainf*** are the most maintainable languages.
> 
> Did you read just the first half of my sentence?  Human programmers do
> not read the brackets, but instead read the indentation.

If every bracket in Lisp were indented separately (i.e. on a separate line),
as braces are typically in C/C++/Java then I would agree. However, they are
not. In this case you have 12 brackets nested 3 levels deep on a single
line, where indentation cannot help.

>> Consider the example:
>>
>> (defun fib (x)
>>           (if (<= x 2)
>>               1
>>               (+ (fib (- x 2))(fib (1- x)))))
> 
> This is not production-quality maintainable lisp code, because it
> ignores the conventions that have developed and are supported by the
> tools.  However, if I take your mangled example, place it in a lisp
> buffer and ask my editor to reformat it (C-M-q in emacs, for
> instance), I get
> 
> (defun fib (x)
>   (if (<= x 2)
>       1
>       (+ (fib (- x 2))(fib (1- x)))))
> 
> and the code obeys more of the conventions: it is much easier to read.

Can I just clarify - the only difference is the constant indentation on the
lines after defun? Unless I'm missing something, I can't see how this makes
it much easier to read.

> (Still not perfect, because there should be a space between the "))"
> and the "(", but good enough).

Yes, I would have expected that.

> If you are counting or reading brackets manually when writing lisp
> code, you are doing something wrong.

Surely if your constructs require ((...)) then you must count brackets to
ensure that there are two sets of nested brackets?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Thomas A. Russ
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ymi8xz1fl14.fsf@sevak.isi.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> 
> Christophe Rhodes wrote:
> >> Consider the example:
> >>
> >> (defun fib (x)
> >>           (if (<= x 2)
> >>               1
> >>               (+ (fib (- x 2))(fib (1- x)))))
> > 
> > This is not production-quality maintainable lisp code, because it
> > ignores the conventions that have developed and are supported by the
> > tools.  However, if I take your mangled example, place it in a lisp
> > buffer and ask my editor to reformat it (C-M-q in emacs, for
> > instance), I get
> > 
> > (defun fib (x)
> >   (if (<= x 2)
> >       1
> >       (+ (fib (- x 2))(fib (1- x)))))
> > 
> > and the code obeys more of the conventions: it is much easier to read.
> 
> Can I just clarify - the only difference is the constant indentation on the
> lines after defun? Unless I'm missing something, I can't see how this makes
> it much easier to read.

It actually does, because of convention.  The alignment of the body of
the function is more "natural" to an experienced lisp programmer.

Seeing the function body closer to the left edge makes a difference,
precisely because one gets used to reading the whitespace/indentation
instead of counting the parentheses.

Actually, my preferred format for that example would use one additional
line:

(defun fib (x)
  (if (<= x 2)
      1
      (+ (fib (- x 2))
         (fib (1- x)))))

and thus more clearly allows quick identification of the two arguments
to the + function.


> Surely if your constructs require ((...)) then you must count brackets to
> ensure that there are two sets of nested brackets?

Only at the beginning.  At the end, your Lisp-aware editor kindly
matches the parentheses for you.  It is, in fact, the same with C-style
languages where you have all of those {} pairs to match up when you
combine loops, conditionals and function bodies.  But in some ways it
gets a bit harder BECAUSE of the separate line convention, since it is
more likely that the matching part is off screen.

-- 
Thomas A. Russ,  USC/Information Sciences Institute
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124271328.648716.68980@g47g2000cwa.googlegroups.com>
Jon Harrop wrote:

> Surely if your constructs require ((...)) then you must count brackets to
> ensure that there are two sets of nested brackets?

Even when I was a beginner in Scheme I never counted
parentheses/brackets. Let me kindly ask: why the hell do you want to
count brackets? Heh? Emacs or your favorite Lisp/Scheme editor does
your work for you when formatting and indenting.

Schneewittchen
From: Christophe Rhodes
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <sqvf259ihf.fsf@cam.ac.uk>
Jon Harrop <······@jdh30.plus.com> writes:

> Christophe Rhodes wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>> Did you read just the first half of my sentence?  Human programmers do
>> not read the brackets, but instead read the indentation.
>
> If every bracket in Lisp were indented separately (i.e. on a separate line),
> as braces are typically in C/C++/Java then I would agree. However, they are
> not. In this case you have 12 brackets nested 3 levels deep on a single
> line, where indentation cannot help.

The line (+ (fib (- x 2)) (fib (1- x))) is approximately maximally
complex in terms of nesting: any more would definitely be frowned
upon, and indeed I would prefer to see
  (+ (fib (- x 2))
     (fib (1- x))) 
even in such "simple" cases.

>> If you are counting or reading brackets manually when writing lisp
>> code, you are doing something wrong.
>
> Surely if your constructs require ((...)) then you must count brackets to
> ensure that there are two sets of nested brackets?

You must trivially count opening parens as you type them, yes.  You
will never have to count closing parens: if you are composing this
code, your editor will tell you what paren you have just closed; if
you are editing this code later, the wrong number of closing parens
will announce itself by the indentation of the rest of the code being
wrong.

Christophe
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m31x4sbmmp.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:


> > If you are counting or reading brackets manually when writing lisp
> > code, you are doing something wrong.
> 
> Surely if your constructs require ((...)) then you must count brackets to
> ensure that there are two sets of nested brackets?

Absolutely not.  That is what the IDE/editor is doing for you.  You
_never_ think about (let alone _count_) the parens.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: David Golden
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <vfjNe.4100$R5.562@news.indigo.ie>
jayessay wrote:

> Absolutely not.  That is what the IDE/editor is doing for you.  You
> _never_ think about (let alone _count_) the parens.
> 

And even if you ARE counting parens, because you are writing lisp in
notepad or something, it's not like they're hard to count:  
1+ for #\( and 1- for #\) ...
From: Brian Downing
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <HSqMe.274342$xm3.216108@attbi_s21>
In article <·························@ptn-nntp-reader03.plus.net>,
Jon Harrop  <······@jdh30.plus.com> wrote:
> Consider the example:
> 
> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))
> 
> In ML this is:
> 
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
> 
> That may be easier to parse for the machine (I don't think it is
> though) but maintainability is about how easily a human can parse it.

Here's your example reformatted correctly:

(defun fib (x)
  (if (<= x 2)
      1
      (+ (fib (- x 2)) (fib (1- x)))))

Now consider the following transformations:

(defun fib (x)
  (if (<= x 2)
      (+ (fib (- x 2)) (fib (1- x)))
      1))

(defun fib (x)
  (+ (fib (- x 2)) (fib (1- x))))

I can perform both of those with one command (both three keystrokes in
this case) in Emacs, and without marking any extents at all.  How many
keys must you hit in your editor to perform the equivalents, and does
your editor understand enough of the syntax to figure out what the
extents of the if and else cases are without you explicitly marking
them? 

(If this seems trivial, which it is in this case, consider that it can
work on any size and complexity of expression.)

Also, I consider the reformatted Lisp to be more readable than the ML,
but then, it's what I'm used to.

-bcd
-- 
*** Brian Downing <bdowning at lavos dot net> 
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124221304.384081.108530@g49g2000cwa.googlegroups.com>
Brian Downing wrote:
> In article <·························@ptn-nntp-reader03.plus.net>,
> Jon Harrop  <······@jdh30.plus.com> wrote:
> > Consider the example:
> >
> > (defun fib (x)
> >           (if (<= x 2)
> >               1
> >               (+ (fib (- x 2))(fib (1- x)))))
> >
> > In ML this is:
> >
> > let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
> >
> > That may be easier to parse for the machine (I don't think it is
> > though) but maintainability is about how easily a human can parse it.
>
> Here's your example reformatted correctly:
>
> (defun fib (x)
>   (if (<= x 2)
>       1
>       (+ (fib (- x 2)) (fib (1- x)))))
>
<snip>

For what it's worth the code first shown would be correctly formatted
were it Scheme.  I think this is where the misunderstanding comes from.
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m28xz1y3r7.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Christophe Rhodes wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>>> Is Lisp code not made less maintainable because of all those brackets?
>> 
>> No, it is made more maintainable because of all those brackets,
>> because it is straightforward to write tools which can manipulate the
>> textual representation of your program, and because human programmers
>> do not read the brackets.
>
> I don't think that makes sense. Continuing that line of thinking, Whitespace
> and Brainf*** are the most maintainable languages.
>
> Consider the example:
>
> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))
>
> In ML this is:
>
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
>
> That may be easier to parse for the machine (I don't think it is though) but
> maintainability is about how easily a human can parse it.

Hmmm. I think I'm with Joe Marshall--this is subjective. When I look
at the ML I immediately get a little worried thought bubbling up
saying, crap, a string of undelimited tokens; now I have to start
thinking about precedence rules. In this simple case it's not a big
problem because I assume the precedence rules are sane and that the ML
is not equivalent to say:

  (defun fib (x)
    (+ (if (<= x 2) 1 (fib (-x 2))) (fib (- x 1))))

But the fact that I have to think about that just makes me wish for
some easy to understand and maintain Lisp code. ;-)

-Peter

P.S. Which is not to say that the mental effort of dealing with
precedence rules is overwhelming--just that it's another cost that (to
me) is higher than the cost (if any) of dealing with parens (which
also come with lots of other benefits, as others in this thread have
pointed out.)

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <7IzMe.1688$Dd.7198@newscontent-01.sprint.ca>
Peter Seibel wrote:
> ...
> P.S. Which is not to say that the mental effort of dealing with
> precedence rules is overwhelming

i am not so sure about that.  sure, as long as you have only a few 
operators and only a few precedence levels, it's fine.  but you only 
have to look at typical C code to see that quite a few programmers feel 
uncertain about C's precedence rules.  you very often see unnecessary 
parentheses.  i am not quite sure whether it is because the writers feel 
uncertain about the precedence rules or whether they fear that later 
maintainers are overwhelmed

hs
From: George Neuner
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <e6k8g1d5qava0j4s22bigkl5qurs6m8pg9@4ax.com>
On Wed, 17 Aug 2005 01:13:45 -0400, Hartmann Schaffer
<··@hartmann.schaffernet> wrote:

>Peter Seibel wrote:
>> ...
>> P.S. Which is not to say that the mental effort of dealing with
>> precedence rules is overwhelming
>
>i am not so sure about that.  sure, as long as you have only a few 
>operators and only a few precedence levels, it's fine.  but you only 
>have to look at typical C code to see that quite a few programmers feel 
>uncertain about C's precedence rules.  you very often see unnecessary 
>parentheses.  i am not quite sure whether it is because the writers feel 
>uncertain about the precedence rules or whether they fear that later 
>maintainers are overwhelmed

C has 45 operators grouped into 17 precedence levels.  23 operator
associate left, 20 associate right, and 2 can go either way (at
different precedence levels, naturally).

Most good C/C++ programmers can recall the operator rules if they stop
to think about them.  But I think most programmers *don't* think about
them while coding - they just parenthesize if they are the least bit
uncertain or if thinking about how to do it takes "too long".

For myself, I define taking "too long" as consciously realizing that
I'm thinking about it.  As a data point I don't know how useful that
is ... I've coded professionally in C++ for 12 years, and in C for
several years prior to that.  When I find myself thinking about
precedence or associativity, it's a very good indication that the
expression I'm working on is far too damn complex.

George
--
for email reply remove "/" from address
From: Stefan Nobis
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87mznhu0u7.fsf@snobis.de>
Jon Harrop <······@jdh30.plus.com> writes:

> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))

> In ML this is:

> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)

The ML code is not easier to read because there are less
parens. In fact, I don't read the parens in Lisp (even some weeks
ago when I started learning Lisp) -- i really had to get used to
the prefix notation (reading and writing). This is also what makes
the ML code easer to read for someone who isn't trained in prefix
notation. But IMHO prefix notation is not very hard to read or to
get used to and it's no maintainance problem either.

-- 
Stefan.
From: Förster vom Silberwald
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124271123.128581.262740@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:

> Consider the example:
>
> (defun fib (x)
>           (if (<= x 2)
>               1
>               (+ (fib (- x 2))(fib (1- x)))))
>
> In ML this is:
>
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)

The first one is easier to read.

I noticed in your code snippets (not only here) that you are sometimes
after very packed code structures. Your programming style of one-liners
is maybe good for writing down quickly code in an afternoon. However,
are you sure that one liners are predistend for re-using code in the
long run?

Schneewittchen
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mg9nhF15mgq3U5@news.dfncis.de>
Jon Harrop <······@jdh30.plus.com> wrote:

>(defun fib (x)
>          (if (<= x 2)
>              1 
>              (+ (fib (- x 2))(fib (1- x)))))
>
>In ML this is:
>let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
>
>That may be easier to parse for the machine (I don't think it is though) but
>maintainability is about how easily a human can parse it.

Lisp simply trades in human readability for macros; you decide
whether it's worth it. Some people argue that s-exps are actually
more readable (well, I guess if you see nothing else for years or
even decades, that might actually be true at some time) whereas
others prefer to have compilers generate the parse trees for them.
However, you lose the power of macrology in the latter case. Then
again, while macros are certainly cool, I've never felt the need
for them when writing SML programs... I don't know why, but maybe
it's because the syntax is already there.

mkb.
From: Jens Axel Søgaard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4302ffea$0$25939$edfadb0f@dread12.news.tele.dk>
Matthias Buelow wrote:

 > Then
> again, while macros are certainly cool, I've never felt the need
> for them when writing SML programs... I don't know why, but maybe
> it's because the syntax is already there.

I missed them the other day. For each of the base types Int8.int, 
Int16.int, Int32.int, Int64.int, Real32.real, Real64.real, Word8.word, 
Word16.word, Word32.word, and Word64.word I had to for each of the 
constructors ref, vector and array to define three functions
(one to register a, say, Int8.int vector with the garbage collector,
one to look up a previously registered vector/array/reference cell, and
one to unregister a root), which subsequently were to be exported by the
FFI. In total I needed to define and export 90 functions.

I bailed out and write a Scheme programme that generated the functions,
but if SML had macros that wouldn't have been neccessary.

-- 
Jens Axel S�gaard
From: Alain Picard
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87u0hozygw.fsf@memetrics.com>
Matthias Buelow <···@incubus.de> writes:

> Lisp simply trades in human readability for macros; 

Readability is a function of expertise.  This is true
in all languages, computer or human languages.

Lisp is, quite honestly, by FAR the most readable
computer language I have ever worked with.  Interestingly
enough, the next most readable language is probably FORTH.
Maybe I just owned an HP calculator too early in my life...   
From: David Golden
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <8HhNe.4095$R5.765@news.indigo.ie>
Matthias Buelow wrote:


> Lisp simply trades in human readability for macros;  you decide 
> whether it's worth it.

Speak for yourself. Humans vary. I find languages like lisp and forth
easier to read. Apparently some other people's brains just work
differently, to the extent that I wonder if programmers' brains can be
sorted into two basic sorts based on preference for simplicity or
complexity of syntax.

> Some people argue that s-exps are actually
> more readable (well, I guess if you see nothing else for years or
> even decades, that might actually be true at some time)

Obviously, it'll be easier for someone who knows some lisp to read
lisp...
From: Hannah Schroeter
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de4rta$u2l$1@c3po.use.schlund.de>
Hello!

David Golden  <············@oceanfree.net> wrote:
>Matthias Buelow wrote:

>> Lisp simply trades in human readability for macros;  you decide 
>> whether it's worth it.

>Speak for yourself. Humans vary. I find languages like lisp and forth
>easier to read. Apparently some other people's brains just work
>differently, to the extent that I wonder if programmers' brains can be
>sorted into two basic sorts based on preference for simplicity or
>complexity of syntax.

I don't think so. I like Lisp *and* I like Haskell and SML.

And I'm in a hate-love relationship to C++ :-/

>[...]

Kind regards,

Hannah.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305f5b3$0$17470$ed2e19e4@ptn-nntp-reader04.plus.net>
Hannah Schroeter wrote:
> And I'm in a hate-love relationship to C++ :-/

That's interesting. What is there to love about C++?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: David Golden
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <S6nNe.4125$R5.795@news.indigo.ie>
Jon Harrop wrote:

> Hannah Schroeter wrote:
>> And I'm in a hate-love relationship to C++ :-/
> 
> That's interesting. What is there to love about C++?
> 
Well, "C++ template metaprogramming" is very slightly like 
macro writing in lisp, only in a world filled with pain.

So maybe if you're a lisper and somewhat masochistic, 
C++ has some appeal.  Or if you're an ordinary lisper,
you may hate writing in C++ but be (relative to
ordinary C++ programmers) very good at template 
metaprogramming constructs none the less, thus leading to a 
"hate-love" relationship, where C++ loves you but you hate it.
From: Hannah Schroeter
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dgmnsd$91p$1@c3po.use.schlund.de>
Hello!

Jon Harrop  <······@jdh30.plus.com> wrote:
>Hannah Schroeter wrote:
>> And I'm in a hate-love relationship to C++ :-/

>That's interesting. What is there to love about C++?

I think I don't need to explain the "hate" part. Ok, a few words:
lack of automatic memory management, lack of real lexical closures,
lack of real macros.

So... It spares me quite some typing compared to plain C, which would
probably the only alternative offered to me at this workplace.

This is achieved both by the C++ standard library, as well as the
additional abstraction capabilities.

Kind regards,

Hannah.
From: David Golden
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <rPmNe.4123$R5.805@news.indigo.ie>
Hannah Schroeter wrote:
> 
> I don't think so. I like Lisp *and* I like Haskell and SML.
> 
Ah, well, maybe you have two brains... :-)

Of course, liking Lisp and Haskell and SML isn't the same as liking
lisp syntax and haskell syntax and sml syntax (in context I guess
the latter may still be true of you, but I'm just making a point here:)
One could be fond of haskell *despite* the syntax - it has some
interesting features, after all, that could mean on balance one likes
it as a whole...
From: Hannah Schroeter
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dgmorn$r39$1@c3po.use.schlund.de>
Hello!

David Golden  <············@oceanfree.net> wrote:
>Hannah Schroeter wrote:

>> I don't think so. I like Lisp *and* I like Haskell and SML.

>Ah, well, maybe you have two brains... :-)

Might be ;-)

>Of course, liking Lisp and Haskell and SML isn't the same as liking
>lisp syntax and haskell syntax and sml syntax (in context I guess
>the latter may still be true of you, but I'm just making a point here:)
>One could be fond of haskell *despite* the syntax - it has some
>interesting features, after all, that could mean on balance one likes
>it as a whole...

In fact I really don't dislike Haskell's or SML's concrete syntax
either. Seems I'm more flexible towards different kinds of concrete
syntax than some other people.

Wrt language concepts, I'd like something like Common Lisp, with
very lightweight Erlang style concurrency added on top of it.
(No, 1:1 native threads don't count, they're too heavyweight,
just reckon that each thread needs a stack of at least one page,
and a kernel stack of at least one page, this is min. 8k per thread,
in fact, OpenBSD uses 2 upages on i386, i.e. it's min. 12k, dunno
where to find the relevant info in the Linux kernel sources,
for comparison, Erlang processes cost less than 1k each, dito GHC's
forkIO threads; so I think for real scalability you need 1:n or m:n
thread implementations; and I also mean the message passing part of
"Erlang style").

And I'd like just any CL implementation to run again on OpenBSD...

Kind regards,

Hannah.
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3p8qfcF98i4qU2@individual.net>
Hannah Schroeter wrote:
> Wrt language concepts, I'd like something like Common Lisp, with
> very lightweight Erlang style concurrency added on top of it.

Same here...

> (No, 1:1 native threads don't count, they're too heavyweight,

The problem is that multi-processing is important, especially for 
scalability, and it's quite hard to get M:N thread models running (and 
getting them to run faster than something like OS threads on Linux).

If anyone would implement threads with the advantages of both user- and 
kernel-threading, I'd love that.  Languages could easily include that.

-- 
My mouth says the words, my brain is thinking monstertrucks.
Joey (Friends)
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <432f5102$0$97107$ed2619ec@ptn-nntp-reader03.plus.net>
Ulrich Hobelmann wrote:
> The problem is that multi-processing is important,

Currently only for a tiny minority of applications. This may change in the
near future but I'm not yet convinced.

> especially for 
> scalability, and it's quite hard to get M:N thread models running (and
> getting them to run faster than something like OS threads on Linux).
> 
> If anyone would implement threads with the advantages of both user- and
> kernel-threading, I'd love that.  Languages could easily include that.

OCaml is totally thread unsafe. My impression is that writing a high
performance GC is very hard but, combined with concurrency, it is currently
practically impossible.

I'm more than happy to embrace new languages but I would not be willing to
sacrifice a significant amount of performance for a concurrent GC when I
only have one non-uniprocessor machine. Single processor performance is far
more important to me.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Robert Strandh
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <6wpsr45yvb.fsf@serveur5.labri.fr>
Jon Harrop <······@jdh30.plus.com> writes:

> My impression is that writing a high
> performance GC is very hard but, combined with concurrency, it is currently
> practically impossible.

I doubt that.  See for instance:

  http://portal.acm.org/citation.cfm?id=174673
-- 
Robert Strandh
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <432f75e8$0$17492$ed2e19e4@ptn-nntp-reader04.plus.net>
Robert Strandh wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> My impression is that writing a high
>> performance GC is very hard but, combined with concurrency, it is
>> currently practically impossible.
> 
> I doubt that.  See for instance:
> 
>   http://portal.acm.org/citation.cfm?id=174673

You'll notice that they (the authors of OCaml) have since evolved to the
non-concurrent GC that I mentioned. :-)

See their more recent rablings:

"... a concurrent GC avoids this problem, but adds tremendous complexity ...
Why was Concurrent Caml Light abandoned?  Too complex; too hard to debug
(despite the existence of a machine-checked proof of correctness)" - Xavier
Leroy
http://caml.inria.fr/pub/ml-archives/caml-list/2002/11/64c14acb90cb14bedb2cacb73338fb15.en.html

I have no experience of writing GCs, let alone concurrent GCs, but from what
I've read it is very difficult to write a working concurrent GC with
reasonable performance, let alone one that gets anywhere near to the
performance of a good non-concurrent GC (like the current OCamls).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3p9tt6F9edp8U1@individual.net>
Jon Harrop wrote:
> Ulrich Hobelmann wrote:
>> The problem is that multi-processing is important,
> 
> Currently only for a tiny minority of applications. This may change in the
> near future but I'm not yet convinced.

Well, multicore machines are starting to be widely available, and it 
would be a waste to have every program only use one core at a time.

Also, maybe every server application of significant size needs to be 
distributed or at least multi-CPU.  Load-balancing isn't everything.

Of course, on the desktop, or in the small web-apps area things are 
different.

-- 
My mouth says the words, my brain is thinking monstertrucks.
Joey (Friends)
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <wtlbgwim.fsf@alum.mit.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> My impression is that writing a high performance GC is very hard
> but, combined with concurrency, it is currently practically
> impossible.

Many people have been experimenting with high-performance, mostly
concurrent GC.  IBM and Sun are two of the main places.
 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4330a0c9$0$1294$ed2619ec@ptn-nntp-reader02.plus.net>
Joe Marshall wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> My impression is that writing a high performance GC is very hard
>> but, combined with concurrency, it is currently practically
>> impossible.
> 
> Many people have been experimenting with high-performance, mostly
> concurrent GC.  IBM and Sun are two of the main places.

What have their results been like? Is Sun's Java representative of their
high-performance GC?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <oe6ngufz.fsf@alum.mit.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> Joe Marshall wrote:
>> Jon Harrop <······@jdh30.plus.com> writes:
>>> My impression is that writing a high performance GC is very hard
>>> but, combined with concurrency, it is currently practically
>>> impossible.
>> 
>> Many people have been experimenting with high-performance, mostly
>> concurrent GC.  IBM and Sun are two of the main places.
>
> What have their results been like? Is Sun's Java representative of their
> high-performance GC?

I don't know about Sun, but it looks like IBM's stuff has been real
interesting.  Check out the Metronome project at IBM.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4330a827$0$1294$ed2619ec@ptn-nntp-reader02.plus.net>
Joe Marshall wrote:
> I don't know about Sun, but it looks like IBM's stuff has been real
> interesting.  Check out the Metronome project at IBM.

Metronome certainly sounds very impressive but I can't find any benchmarks
giving quantitative results.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de3bgt$iop$1@ulric.tng.de>
Jon Harrop schrieb:
> Christophe Rhodes wrote:
> 
>>Jon Harrop <······@jdh30.plus.com> writes:
>>
>>>Is Lisp code not made less maintainable because of all those brackets?
>>
>>No, it is made more maintainable because of all those brackets,
>>because it is straightforward to write tools which can manipulate the
>>textual representation of your program, and because human programmers
>>do not read the brackets.
> 
> 
> I don't think that makes sense. Continuing that line of thinking, Whitespace
> and Brainf*** are the most maintainable languages.
> 
> Consider the example:
> 
> (defun fib (x)
>           (if (<= x 2)
>               1 
>               (+ (fib (- x 2))(fib (1- x)))))
> 
> In ML this is:
> 
> let fib x = if x<=2 then 1 else fib(x-2) + fib(x-1)
> 
> That may be easier to parse for the machine (I don't think it is though) but
> maintainability is about how easily a human can parse it.

What you did was to create artificially an example which serves your 
purpose of showing how much better ML can be read. What you did not do 
was to mention how the pure functional programming style can complicate 
things. What if you need to pass an argument through 12 functions 
because you can't save it anywhere? Is ML then still easier to parse?

How would you implement an Unit Test Framework in ML in 16 lines (not 
counting comments and blank lines?) which can be used for some simple 
programs?
How readable are webservers written in ML? Maybe ML is only useful for 
some specific tiny sets of pure mathematical applications, used in 
universities?


Andr�
-- 
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mlcvdF179p8nU1@news.dfncis.de>
Andr? Thieme <······························@justmail.de> wrote:

>What you did was to create artificially an example which serves your 
>purpose of showing how much better ML can be read. What you did not do 
>was to mention how the pure functional programming style can complicate 
>things. What if you need to pass an argument through 12 functions 
>because you can't save it anywhere? Is ML then still easier to parse?

ML is not pure functional.

>How readable are webservers written in ML? Maybe ML is only useful for 
>some specific tiny sets of pure mathematical applications, used in 
>universities?

Maybe you don't have a clue?

mkb.
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mlm3kF17alavU1@individual.net>
Andr� Thieme wrote:
> What you did was to create artificially an example which serves your 
> purpose of showing how much better ML can be read. What you did not do 
> was to mention how the pure functional programming style can complicate 
> things. What if you need to pass an argument through 12 functions 
> because you can't save it anywhere? Is ML then still easier to parse?

The parser doesn't care how many parameters a given function has.

And what does this have to do with pureness?  Do you want the great 
alternative of storing all kinds of values in global variables?

Actually in Lisp you can do that in a clean way, using special 
variables.  IMHO this isn't any less pure than pure FP, because it still 
avoids all destructive updates.

> How would you implement an Unit Test Framework in ML in 16 lines (not 
> counting comments and blank lines?) which can be used for some simple 
> programs?

Sure, that's where it gets hairy.

> How readable are webservers written in ML? Maybe ML is only useful for 
> some specific tiny sets of pure mathematical applications, used in 
> universities?

I think the worst about ML is its not too indentation-friendly syntax. 
At times I thought about implementing an sexp->ML translator, but just 
stopped using ML instead.  The other thing to point out is the 
not-too-great documentation, but all open source suffers from that, IMHO.

ML is used outside of universities, though probably less for web 
services, communication etc. than Java and C.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305bdd5$0$22906$ed2619ec@ptn-nntp-reader01.plus.net>
Andr� Thieme wrote:
> What you did was to create artificially an example which serves your
> purpose of showing how much better ML can be read.

The same argument applies to basically all functions that I want to write.

> What you did not do 
> was to mention how the pure functional programming style can complicate
> things.

If an imperative style is better, then use an imperative style. This can be
done directly in ML or indirectly using monads in Haskell.

> What if you need to pass an argument through 12 functions 
> because you can't save it anywhere? Is ML then still easier to parse?

let f a b c d e f g h i j k l =
  let f1 = ... in
  let f2 = ... in

That seems pretty simple to me. If you would resort to an imperative style
in Lisp simply because it cannot express this so easily then I would say
that is another bullet in Lisp's syntax. However, my guess is that seasoned
Lisp programmers would not write this the way you would...

> How would you implement an Unit Test Framework in ML in 16 lines (not
> counting comments and blank lines?) which can be used for some simple
> programs?

Can you describe what that is?

> How readable are webservers written in ML?

In OCaml, very readable. Same goes for DNS, SSH and so forth. Judging by the
results of the ray tracer, it would be extremely difficult to write an SSH
server in Lisp which had comparable performance.

> Maybe ML is only useful for 
> some specific tiny sets of pure mathematical applications, used in
> universities?

Given that my company is using OCaml for scientific computing and
visualisation applications in industry, I think we can safely say that your
assertion is definitely wrong.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124460272.758739.324890@g44g2000cwa.googlegroups.com>
Jon Harrop wrote:
> André Thieme wrote:
> > What you did was to create artificially an example which serves your
> > purpose of showing how much better ML can be read.
>
> The same argument applies to basically all functions that I want to write.
>
...
>
> let f a b c d e f g h i j k l =
>   let f1 = ... in
>   let f2 = ... in
>
> That seems pretty simple to me. If you would resort to an imperative style
> in Lisp simply because it cannot express this so easily then I would say
> that is another bullet in Lisp's syntax. However, my guess is that seasoned
> Lisp programmers would not write this the way you would...
...

I don't understand what you are trying to achieve by your chattering on
comp.lang.lisp.

You are evidently unwilling to actually spend time and mental effort to
learn Lisp enough to become fluent enough to figure these comparisons
out for yourself, but are quite willing to respond to offered
explanations with gratuitous criticism of Lisp syntax and macros
apparently without having an informed opinion on either.

Perhaps you expect the denizens of comp.lang.lisp to become expert
enough in ML to offer you a personal tutorial in response to these
barbs? That seems to me contrary to all human nature.

If you wish to learn Common Lisp, there are several excellent and
modern books available, even available for download.

Lisp programmers are quite satisfied with the syntax they have. Some of
the smartest programmers in the world have had 50 years to improve it,
and they've left it pretty much as it is. Do we really need to offer
more defense than that to every know-nothing complainer who wanders by?

> Given that my company is using OCaml for scientific computing and
> visualisation applications in industry, I think we can safely say that your
> assertion is definitely wrong.

If you company can successfully use OCaml to make money, that's just
great. Why don't you go do some programming in OCaml to earn your
paycheck and leave the rest of us alone?
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4305f9ff$0$17470$ed2e19e4@ptn-nntp-reader04.plus.net>
··············@hotmail.com wrote:
> Jon Harrop wrote:
>> Andr� Thieme wrote:
>> > What you did was to create artificially an example which serves your
>> > purpose of showing how much better ML can be read.
>>
>> The same argument applies to basically all functions that I want to
>> write.
>>
> ...
>>
>> let f a b c d e f g h i j k l =
>>   let f1 = ... in
>>   let f2 = ... in
>>
>> That seems pretty simple to me. If you would resort to an imperative
>> style in Lisp simply because it cannot express this so easily then I
>> would say that is another bullet in Lisp's syntax. However, my guess is
>> that seasoned Lisp programmers would not write this the way you would...
> ...
> 
> I don't understand what you are trying to achieve by your chattering on
> comp.lang.lisp.

I am trying to find out what Lisp programmers think of the alternatives and
what motivates them to continue using Lisp.

> You are evidently unwilling to actually spend time and mental effort to
> learn Lisp enough to become fluent enough to figure these comparisons
> out for yourself, but are quite willing to respond to offered
> explanations with gratuitous criticism of Lisp syntax and macros
> apparently without having an informed opinion on either.

If you believe I have said something wrong then I would appreciate it if you
could point out what, specifically.

> Perhaps you expect the denizens of comp.lang.lisp to become expert
> enough in ML to offer you a personal tutorial in response to these
> barbs? That seems to me contrary to all human nature.

I also find it interesting to see the kinds of responses that questions get
in forums. The c.l.java.programming crowd were mostly unhelpful. The
c.l.functional crowd were mostly helpful. This newsgroup is partway between
the two. That is useful to know if people are considering trying to learn
and use Lisp.

> If you wish to learn Common Lisp, there are several excellent and
> modern books available, even available for download.

I'm trying to decide if it is worth learning Lisp so I am more interested in
learning why Lisp programmers chose Lisp and seeing how a seasoned Lisp
programmer can translate my ray tracer, for example. Why do you use Lisp?

Obviously I cannot do a good enough job of the conversion myself to make for
a fair comparison with the other languages (that I am much more familiar
with). It is also interesting to study the differences between the 8 Lisp
and 13 Scheme translations that have been done.

So now I'd like to know how Lisp is more expressive than the alternatives.
What practical problems are easier to write in Lisp than in other
languages? Macros seem to be a strong point here. Lisp's macros are more
powerful than OCaml's and Lisp programs are likely to be faster than
Mathematica programs. So there is one niche. Are there others?

> Lisp programmers are quite satisfied with the syntax they have. Some of
> the smartest programmers in the world have had 50 years to improve it,
> and they've left it pretty much as it is. Do we really need to offer
> more defense than that to every know-nothing complainer who wanders by?

We already addressed that point.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: ··············@hotmail.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124468908.888717.85390@f14g2000cwb.googlegroups.com>
Jon Harrop wrote:

> I am trying to find out what Lisp programmers think of the alternatives and
> what motivates them to continue using Lisp.
>

There are as many motivations for using Lisp as there are Lisp
programmers. it seems like a silly question to me, on par with asking a
mathematician what his motivation is for using logarithms.
Most people don't spend their time considering every possible
alternative. They use what they know, what is handy, and what seems
appropriate to the task. They don't try to pick some global optimum
every time they set to work.

>
> If you believe I have said something wrong then I would appreciate it if you
> could point out what, specifically.

You have shown no evidence of trying to learn and use Lisp. You have
shown evidence for believing OCaml to be superior, without indicating
you have given equal effort and gained equal experience in both Lisp
and OCaml before making that judgment.

Part of learning and using a computer language, just as with a human
language, is to understand and adopt the idioms and outlook of a
programming community.

Your responses show that you do not understand the concept of Lisp
macros. Your mention of "AST" (abstract syntax trees) and comparisons
to Mathematica, for instance, are not on point. They indicate that you
are using the mental constructs of other programming techniques rather
than the mental constructs appropriate to Lisp macros.  "Macro" is an
unfortunately overloaded word in the computer field, which means very
different things in different environments.

> I'm trying to decide if it is worth learning Lisp so I am more interested in
> learning why Lisp programmers chose Lisp and seeing how a seasoned Lisp
> programmer can translate my ray tracer, for example. Why do you use Lisp?

I use Lisp because it allows me to quickly develop powerful code to
deal with my day-to-day computer chores, ranging from file munging to
mathematical calculations to bit-twiddling. Other folks would use Perl
or Mathematica for the same purpose, but I find Lisp to be more to my
taste. Run-time performance is usually the least of my concerns.

The techniques to get good run-time performance in Lisp are generally
different than other languages, and, as in all optimization, often
depends strongly on the specifics of a platform, compiler vendor, and
problem domain. One approach, for instance, is to use Lisp interfaces
to underlying C, Fortran, or platform-specific assembly-language
libraries. Matlisp is an open-source example that brings LAPACK and
BLAS into the Lisp domain. Lisp macros are an excellent way to smooth
over the rough edges of foreign libraries, and deal with some necessary
translations at compile-time rather than run-time.

> So now I'd like to know how Lisp is more expressive than the alternatives.
> What practical problems are easier to write in Lisp than in other
> languages? Macros seem to be a strong point here. Lisp's macros are more
> powerful than OCaml's and Lisp programs are likely to be faster than
> Mathematica programs. So there is one niche. Are there others?

Lisp macros are not a "niche." They are a technique for writing
computer programs that write computer programs for you. This is a
marvelous technique which provides the Lisp programmer with tremendous
leverage to develop "Lispy" abstractions that address a specific
problem domain while maintaining the general flexibility and power that
the Lisp language usually provides. To give one description that might
make some sense to you, it is a Turing-complete macro system, much like
the C++ template system, but based on the full expressive power of the
Lisp language, and not just a Turing-complete special-purpose syntax.

Lisp macros have been used in the past to develop multiple full-fledged
object oriented extensions to the basic Lisp language, for instance,
without having to change the Lisp language syntax in the slightest.
That's one reason Lisp people spend very little time worrying about
syntax.

>
> > Lisp programmers are quite satisfied with the syntax they have. Some of
> > the smartest programmers in the world have had 50 years to improve it,
> > and they've left it pretty much as it is. Do we really need to offer
> > more defense than that to every know-nothing complainer who wanders by?
>
> We already addressed that point.

And you seem to have missed it.

> [clipped from above]
>
> I also find it interesting to see the kinds of responses that questions get
> in forums. The c.l.java.programming crowd were mostly unhelpful. The
> c.l.functional crowd were mostly helpful. This newsgroup is partway between
> the two. That is useful to know if people are considering trying to learn
> and use Lisp.

Most Lisp programmers are used to the idea that many programmers are
unfamiliar with Lisp, or had a brief encounter marked by poor pedagogy
(based on a 1960's view of the Lisp language), and are willing to go to
some lengths to address questions posed out of honest misconceptions.
However, a belligerent belief by some novice that he can come in and
discuss with us the "weaknesses" of the Lisp language on an equal basis
to a Lisp expert naturally rubs us the wrong way.

In general, your approach exudes arrogance without the corresponding
level of experience to justify it.

To make an analogy, if a guitarist, no matter how talented, passes by a
group of violinists, and starts a discussion along the lines of

"I'm thinking of taking up violin, and I'd like to know how violin
compares to guitar, both pros and cons. Your ability to strum the
strings appears quite limited, because you insist on holding it up to
your neck. And you appear to have ignored all the latest developments
in picks. Can one of you show me how you would play this tablature
arrangement of a Led Zepplin tune?..."

He should not be surprised to get a range of hostile or dismissive
reactions.

Want to find out whether Lisp is appropriate? Read some tutorials, some
books, try some free-software or commerical trial implementations, and
see how you like it. I.e. do your homework, and pay a little of the
price of admission. If you run into a puzzle, ask a straightforward
question, and you are likely to get a straightforward answer.

Throw down a challenge to the community to prove Lisp's worthiness to
your satisfaction, or ask a question which is based on a hidden agenda
and personal biases, and you are much less likely to get a useful
response.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43060ff7$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
··············@hotmail.com wrote:
> Jon Harrop wrote:
>> I am trying to find out what Lisp programmers think of the alternatives
>> and what motivates them to continue using Lisp.
> 
> There are as many motivations for using Lisp as there are Lisp
> programmers. it seems like a silly question to me, on par with asking a
> mathematician what his motivation is for using logarithms.
> Most people don't spend their time considering every possible
> alternative. They use what they know, what is handy, and what seems
> appropriate to the task. They don't try to pick some global optimum
> every time they set to work.

Sure, you have to find a balance between learning new stuff and learning
more detail about stuff you already know. But there are likely to be at
least a few Lisp programmers out there who have studied the alternatives in
detail and who have rational reasons for continuing to use Lisp instead.
I'd like to know what their reasons are.

>> If you believe I have said something wrong then I would appreciate it if
>> you could point out what, specifically.
> 
> You have shown no evidence of trying to learn and use Lisp. You have
> shown evidence for believing OCaml to be superior, without indicating
> you have given equal effort and gained equal experience in both Lisp
> and OCaml before making that judgment.

I don't believe OCaml is superior to Lisp.

> Your responses show that you do not understand the concept of Lisp
> macros. Your mention of "AST" (abstract syntax trees) and comparisons
> to Mathematica, for instance, are not on point.

Several people have said this but none have explained why. Can you explain
why you think this?

> They indicate that you 
> are using the mental constructs of other programming techniques rather
> than the mental constructs appropriate to Lisp macros.  "Macro" is an
> unfortunately overloaded word in the computer field, which means very
> different things in different environments.

Yes. I've yet to find a coherent definition of macro in this context.

>> I'm trying to decide if it is worth learning Lisp so I am more interested
>> in learning why Lisp programmers chose Lisp and seeing how a seasoned
>> Lisp programmer can translate my ray tracer, for example. Why do you use
>> Lisp?
> 
> I use Lisp because it allows me to quickly develop powerful code to
> deal with my day-to-day computer chores, ranging from file munging to
> mathematical calculations to bit-twiddling. Other folks would use Perl
> or Mathematica for the same purpose, but I find Lisp to be more to my
> taste. Run-time performance is usually the least of my concerns.

Ok, thanks for the info.

> The techniques to get good run-time performance in Lisp are generally
> different than other languages, and, as in all optimization, often
> depends strongly on the specifics of a platform, compiler vendor, and
> problem domain.

It seems to be quite similar to ML actually. In OCaml, polymorphism incurs a
run-time performance penalty. So you might want to remove that generality
just as you might want to specialise arithmetic in Lisp. In Lisp, you want
to remove run-time type checking. In ML, the equivalent is to flatten
unnecessary variant types.

> One approach, for instance, is to use Lisp interfaces 
> to underlying C, Fortran, or platform-specific assembly-language
> libraries. Matlisp is an open-source example that brings LAPACK and
> BLAS into the Lisp domain. Lisp macros are an excellent way to smooth
> over the rough edges of foreign libraries, and deal with some necessary
> translations at compile-time rather than run-time.

Good point. FFIs are a real pain in most of the other languages that I know.
Static type checking makes writing bindings substantially more complicated.
Many bindings (e.g. for OCaml) are unsafe as a consequence, in which case
you've undermined one of the main reasons for using ML.

>> So now I'd like to know how Lisp is more expressive than the
>> alternatives. What practical problems are easier to write in Lisp than in
>> other languages? Macros seem to be a strong point here. Lisp's macros are
>> more powerful than OCaml's and Lisp programs are likely to be faster than
>> Mathematica programs. So there is one niche. Are there others?
> 
> Lisp macros are not a "niche." They are a technique for writing
> computer programs that write computer programs for you. This is a
> marvelous technique which provides the Lisp programmer with tremendous
> leverage to develop "Lispy" abstractions that address a specific
> problem domain while maintaining the general flexibility and power that
> the Lisp language usually provides. To give one description that might
> make some sense to you, it is a Turing-complete macro system, much like
> the C++ template system, but based on the full expressive power of the
> Lisp language, and not just a Turing-complete special-purpose syntax.

Yes. I did quite a bit of work on template metaprogramming in C++ but I
decided it was futile in the end and gave up. There were too many bugs in
g++ (which kept segfaulting whilst compiling or generating incorrect code)
and it took so long to compile that it just wasn't worth the effort. I
guess Lisp macros are vastly quicker to compile, and running time isn't too
important for a macro.

I've used metaprogramming in MetaOCaml and Mathematica as well. MetaOCaml is
a very interesting development but doesn't yet have a native code gen.
Mathematica is hugely powerful in this respect because its built-in
simplification "macros" can be used to optimise intermediate
representations of programs. I'd say that metaprogramming has been most
useful to me in Mathematica but I still wouldn't use it very often.

> Lisp macros have been used in the past to develop multiple full-fledged
> object oriented extensions to the basic Lisp language, for instance,
> without having to change the Lisp language syntax in the slightest.
> That's one reason Lisp people spend very little time worrying about
> syntax.

Yes. This might be a good justification for writing interpreters and
compilers in Lisp. I've studied Sussman and Abelson's lectures on this,
which were quite interesting. However, I'd be concerned about predicting
the efficiency of the resulting code.

>> > Lisp programmers are quite satisfied with the syntax they have. Some of
>> > the smartest programmers in the world have had 50 years to improve it,
>> > and they've left it pretty much as it is. Do we really need to offer
>> > more defense than that to every know-nothing complainer who wanders by?
>>
>> We already addressed that point.
> 
> And you seem to have missed it.

What do you mean?

>> [clipped from above]
>>
>> I also find it interesting to see the kinds of responses that questions
>> get in forums. The c.l.java.programming crowd were mostly unhelpful. The
>> c.l.functional crowd were mostly helpful. This newsgroup is partway
>> between the two. That is useful to know if people are considering trying
>> to learn and use Lisp.
> 
> Most Lisp programmers are used to the idea that many programmers are
> unfamiliar with Lisp, or had a brief encounter marked by poor pedagogy
> (based on a 1960's view of the Lisp language), and are willing to go to
> some lengths to address questions posed out of honest misconceptions.
> However, a belligerent belief by some novice that he can come in and
> discuss with us the "weaknesses" of the Lisp language on an equal basis
> to a Lisp expert naturally rubs us the wrong way.

I'm more interested in the strengths of Lisp than its weaknesses. It is
interesting to hear other people discuss infix vs prefix but I'd much
rather know why macros are useful.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124480767.298370.297210@g47g2000cwa.googlegroups.com>
Jon Harrop wrote:
> >>
> >> I also find it interesting to see the kinds of responses that questions
> >> get in forums. The c.l.java.programming crowd were mostly unhelpful. The
> >> c.l.functional crowd were mostly helpful. This newsgroup is partway
> >> between the two. That is useful to know if people are considering trying
> >> to learn and use Lisp.
> >
> > Most Lisp programmers are used to the idea that many programmers are
> > unfamiliar with Lisp, or had a brief encounter marked by poor pedagogy
> > (based on a 1960's view of the Lisp language), and are willing to go to
> > some lengths to address questions posed out of honest misconceptions.
> > However, a belligerent belief by some novice that he can come in and
> > discuss with us the "weaknesses" of the Lisp language on an equal basis
> > to a Lisp expert naturally rubs us the wrong way.
>
> I'm more interested in the strengths of Lisp than its weaknesses. It is
> interesting to hear other people discuss infix vs prefix but I'd much
> rather know why macros are useful.

One thing no-one else has mentioned is core files.

In Common Lisps you often work by opening up the lisp command line and
typing stuff in, or cutting it out of an editor.  Then you run the
functions, possibly compiling them as you go.

If you want to do something with what you've done later then you can
belch out the whole system state to an executable file.  Later you can
execute it and keep going.  (You have to keep the source to of-course,
or there's no point.)

Your finished programs can even do this.  That is, if a lot of data
ends up being stored in a program you can write a feature to save a
core file.  The user can then start up from where they left of using
the core file.  The programmer has to do little extra work to support
this.  The disadvantage is that core files are rather large (~6MB for
GCL for example).  (Saving specific data with "print" is often more
efficient).

Lastly, debugging can be made quite simple in lisp.  You bury a
read-eval-print loop somewhere in the program.  If it goes wrong you
enter it and look at the variables or debug it a bit.  Then, from
inside the program you can write new functions to replace the incorrect
ones.  To do this you need to put the whole lisp implementation into
the binary of your program, which isn't hard, but wasteful of disc
space if the program is only small.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430641b1$0$17507$ed2e19e4@ptn-nntp-reader04.plus.net>
Rob Thorpe wrote:
> If you want to do something with what you've done later then you can
> belch out the whole system state to an executable file.  Later you can
> execute it and keep going.  (You have to keep the source to of-course,
> or there's no point.)

Interesting. The Mathematica equivalent is "Save". The OCaml top-level has
no equivalent, AFAIK (there may be one).

> Your finished programs can even do this.  That is, if a lot of data
> ends up being stored in a program you can write a feature to save a
> core file.  The user can then start up from where they left of using
> the core file.  The programmer has to do little extra work to support
> this.  The disadvantage is that core files are rather large (~6MB for
> GCL for example).  (Saving specific data with "print" is often more
> efficient).

Yes, OCaml has type unsafe marshalling by comparison. A lot of people seem
to miss dynamically typed printing but I don't really mind. Not sure why
not. :-)

> Lastly, debugging can be made quite simple in lisp.  You bury a
> read-eval-print loop somewhere in the program.  If it goes wrong you
> enter it and look at the variables or debug it a bit.  Then, from
> inside the program you can write new functions to replace the incorrect
> ones.  To do this you need to put the whole lisp implementation into
> the binary of your program, which isn't hard, but wasteful of disc
> space if the program is only small.

Yes. That is easy in Mathematica but to do it in OCaml requires you to
redefine everything that depended on that function. So are top-levels used
a lot more by Lispers than MLers?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: mikel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <0eANe.23114$gl4.14883942@news.sisna.com>
Jon Harrop wrote:
> Rob Thorpe wrote:
> 
>>If you want to do something with what you've done later then you can
>>belch out the whole system state to an executable file.  Later you can
>>execute it and keep going.  (You have to keep the source to of-course,
>>or there's no point.)
> 
> 
> Interesting. The Mathematica equivalent is "Save". The OCaml top-level has
> no equivalent, AFAIK (there may be one).
> 
> 
>>Your finished programs can even do this.  That is, if a lot of data
>>ends up being stored in a program you can write a feature to save a
>>core file.  The user can then start up from where they left of using
>>the core file.  The programmer has to do little extra work to support
>>this.  The disadvantage is that core files are rather large (~6MB for
>>GCL for example).  (Saving specific data with "print" is often more
>>efficient).
> 
> 
> Yes, OCaml has type unsafe marshalling by comparison. A lot of people seem
> to miss dynamically typed printing but I don't really mind. Not sure why
> not. :-)
> 
> 
>>Lastly, debugging can be made quite simple in lisp.  You bury a
>>read-eval-print loop somewhere in the program.  If it goes wrong you
>>enter it and look at the variables or debug it a bit.  Then, from
>>inside the program you can write new functions to replace the incorrect
>>ones.  To do this you need to put the whole lisp implementation into
>>the binary of your program, which isn't hard, but wasteful of disc
>>space if the program is only small.
> 
> 
> Yes. That is easy in Mathematica but to do it in OCaml requires you to
> redefine everything that depended on that function. So are top-levels used
> a lot more by Lispers than MLers?

 From experience with both, I would say yes. Lisp programmers used the 
toplevel (or REPL, or listener) constantly. ML programmers, not so much.
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124703540.298430.114190@g49g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Rob Thorpe wrote:
> > If you want to do something with what you've done later then you can
> > belch out the whole system state to an executable file.  Later you can
> > execute it and keep going.  (You have to keep the source to of-course,
> > or there's no point.)
>
> Interesting. The Mathematica equivalent is "Save". The OCaml top-level has
> no equivalent, AFAIK (there may be one).
>
> > Your finished programs can even do this.  That is, if a lot of data
> > ends up being stored in a program you can write a feature to save a
> > core file.  The user can then start up from where they left of using
> > the core file.  The programmer has to do little extra work to support
> > this.  The disadvantage is that core files are rather large (~6MB for
> > GCL for example).  (Saving specific data with "print" is often more
> > efficient).
>
> Yes, OCaml has type unsafe marshalling by comparison. A lot of people seem
> to miss dynamically typed printing but I don't really mind. Not sure why
> not. :-)

It depends on what your doing.  If you have to store lots of data thats
quite ordered and fixed, then it's simple.  If you have to store more
complex data like trees it can be useful to have both the possiblity of
printing it with print, or dumping the running program.

> > Lastly, debugging can be made quite simple in lisp.  You bury a
> > read-eval-print loop somewhere in the program.  If it goes wrong you
> > enter it and look at the variables or debug it a bit.  Then, from
> > inside the program you can write new functions to replace the incorrect
> > ones.  To do this you need to put the whole lisp implementation into
> > the binary of your program, which isn't hard, but wasteful of disc
> > space if the program is only small.
>
> Yes. That is easy in Mathematica but to do it in OCaml requires you to
> redefine everything that depended on that function. So are top-levels used
> a lot more by Lispers than MLers?

Yes, lispers love this kind of thing.  A lisp implementation will
generally indirect every non-core function call through a table.  This
firstly slows everything down!  It also means that changing a function
definition changes the effect of calling that function everywhere in
the program.

Changing things directly is easier than messing around with linking
whenever you want to test the effect of changing something small.  It's
a bit heavy on the CPU though.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309a43f$0$17485$ed2e19e4@ptn-nntp-reader04.plus.net>
Rob Thorpe wrote:
> Jon Harrop wrote:
>> Yes, OCaml has type unsafe marshalling by comparison. A lot of people
>> seem to miss dynamically typed printing but I don't really mind. Not sure
>> why not. :-)
> 
> It depends on what your doing.  If you have to store lots of data thats
> quite ordered and fixed, then it's simple.  If you have to store more
> complex data like trees it can be useful to have both the possiblity of
> printing it with print, or dumping the running program.

Yes. I don't think there is a theoretical reason why OCaml could have a:

# define_string_of string_of_ast ast

macro that generated a function to print a given, monomorphic type. That
would be pretty easy to do, in fact...

I tend to just write string_of_* functions for everything. I've got a
problem now because I'd like them to abbreviate their output.

For example, in OCaml you can define an infinite (cyclic) list:

# let rec a = 1 :: a;;
val a : int list =
  [1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
1;
   1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
   ...]

So the top-level abbreviates it (fortunately!). Can you do cyclic data
structures like this in Lisp and, if so, how do top-levels print them?

>> Yes. That is easy in Mathematica but to do it in OCaml requires you to
>> redefine everything that depended on that function. So are top-levels
>> used a lot more by Lispers than MLers?
> 
> Yes, lispers love this kind of thing.  A lisp implementation will
> generally indirect every non-core function call through a table.  This
> firstly slows everything down!  It also means that changing a function
> definition changes the effect of calling that function everywhere in
> the program.

Right.

> Changing things directly is easier than messing around with linking
> whenever you want to test the effect of changing something small.  It's
> a bit heavy on the CPU though.

Recompilation typically takes <1s with OCaml, even for big projects, so it
isn't that bad. Whole program compilers like Stalin and MLton take a lot
longer, of course, so they'd really benefit from something like that.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124725275.420095.48730@g14g2000cwa.googlegroups.com>
Jon Harrop wrote:
> Rob Thorpe wrote:
> > Jon Harrop wrote:
> >> Yes, OCaml has type unsafe marshalling by comparison. A lot of people
> >> seem to miss dynamically typed printing but I don't really mind. Not sure
> >> why not. :-)
> >
> > It depends on what your doing.  If you have to store lots of data thats
> > quite ordered and fixed, then it's simple.  If you have to store more
> > complex data like trees it can be useful to have both the possiblity of
> > printing it with print, or dumping the running program.
>
> Yes. I don't think there is a theoretical reason why OCaml could have a:
>
> # define_string_of string_of_ast ast
>
> macro that generated a function to print a given, monomorphic type. That
> would be pretty easy to do, in fact...
>
> I tend to just write string_of_* functions for everything. I've got a
> problem now because I'd like them to abbreviate their output.
>
> For example, in OCaml you can define an infinite (cyclic) list:
>
> # let rec a = 1 :: a;;
> val a : int list =

<snip>

> So the top-level abbreviates it (fortunately!). Can you do cyclic data
> structures like this in Lisp and, if so, how do top-levels print them?

Yes, as Pascal shows below the printer has a syntax for printing out
circular lists.  You can switch on and off circular-list detection in
the printer with.  They can even be read by the reader.

It isn't particularly good form to use circular lists like this in lisp
though.

> >> Yes. That is easy in Mathematica but to do it in OCaml requires you to
> >> redefine everything that depended on that function. So are top-levels
> >> used a lot more by Lispers than MLers?
> >
> > Yes, lispers love this kind of thing.  A lisp implementation will
> > generally indirect every non-core function call through a table.  This
> > firstly slows everything down!  It also means that changing a function
> > definition changes the effect of calling that function everywhere in
> > the program.
>
> Right.
>
> > Changing things directly is easier than messing around with linking
> > whenever you want to test the effect of changing something small.  It's
> > a bit heavy on the CPU though.
>
> Recompilation typically takes <1s with OCaml, even for big projects, so it
> isn't that bad. Whole program compilers like Stalin and MLton take a lot
> longer, of course, so they'd really benefit from something like that.

Yes, but it maybe in some ways it's contrary to their aims.  The reason
Stalin can create fast code is because it can enclose the world the
program lives in.  By doing this it can eliminate many indirections
that would otherwise be needed in Scheme.  I don't know about MLton
though.
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87d5o57uq7.fsf@thalassa.informatimago.com>
"Rob Thorpe" <·············@antenova.com> writes:
>> # let rec a = 1 :: a;;
>> val a : int list =
>
> <snip>
>
>> So the top-level abbreviates it (fortunately!). Can you do cyclic data
>> structures like this in Lisp and, if so, how do top-levels print them?
>
> Yes, as Pascal shows below the printer has a syntax for printing out
> circular lists.  You can switch on and off circular-list detection in
> the printer with.  They can even be read by the reader.

By the way, I'm not sure OCaml really can print circular _structures_.
Perhaps it only stopped printing the list because it was too big.
Lisp pretty printer can do this too (see *PRINT-LEVEL* and
*PRINT-LENGTH*):

[16]> (let ((*print-length* 4))
        (print (list 1 2 3 4 5 6 7 8 9 0)))

(1 2 3 4 ...) 
(1 2 3 4 5 6 7 8 9 0)


[17]> (let ((*print-level* 4))
        (print '#1=(#1# . rest)) 
        (values))

((((# . REST) . REST) . REST) . REST) 
    ^
    |
    +---- this is a "..." vertical ;-)


In CL, #n= and #n# can be used to write and print any kind of
recursive structures, not only circular lists.

[34]> (let ((infinite-tree '(#1=(left #1# . #2=(right #1# . #2#)) . #2#))
            (*print-circle* t))
        (print infinite-tree)
        (values))

(#1=(LEFT #1# . #2=(RIGHT #1# . #2#)) . #2#))


> It isn't particularly good form to use circular lists like this in lisp
> though.

Cantor would not agree...


-- 
"Specifications are for the weak and timid!"
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430a0be0$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Pascal Bourguignon wrote:
> By the way, I'm not sure OCaml really can print circular _structures_.
> Perhaps it only stopped printing the list because it was too big.

If I'm understanding you correctly, yes, OCaml cannot "print circular
structures", i.e. I don't believe the top-level pretty printer detects
cyclicity.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <87wtme6wj8.fsf@thalassa.informatimago.com>
Jon Harrop <······@jdh30.plus.com> writes:
> For example, in OCaml you can define an infinite (cyclic) list:
>
> # let rec a = 1 :: a;;
> val a : int list =
>   [1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
>    1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1; 1;
>    ...]

[6]> (let ((a '#1=(1 . #1#)))  a)
#1=(1 . #1#)

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

This is a signature virus.  Add me to your signature and help me to live
From: lin8080
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430B107B.E122757E@freenet.de>
Pascal Bourguignon schrieb:

> [6]> (let ((a '#1=(1 . #1#)))  a) #1=(1 . #1#)

Oh dear. This runs on my win98, and runs and runs ...
After 20min I do a reboot and continue reading c.l.l :)

stefan
From: Pascal Bourguignon
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <871x4k6079.fsf@thalassa.informatimago.com>
lin8080 <·······@freenet.de> writes:

> Pascal Bourguignon schrieb:
>
>> [6]> (let ((a '#1=(1 . #1#)))  a) #1=(1 . #1#)
>
> Oh dear. This runs on my win98, and runs and runs ...
> After 20min I do a reboot and continue reading c.l.l :)

You missed a:

     (setf *print-circle* t)

before.

-- 
__Pascal Bourguignon__                     http://www.informatimago.com/

Nobody can fix the economy.  Nobody can be trusted with their finger
on the button.  Nobody's perfect.  VOTE FOR NOBODY.
From: lin8080
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430CB6A7.93172A9C@freenet.de>
Pascal Bourguignon schrieb:
> lin8080 <·······@freenet.de> writes:
> > Pascal Bourguignon schrieb:

> >> [6]> (let ((a '#1=(1 . #1#)))  a) #1=(1 . #1#)

> > Oh dear. This runs on my win98, and runs and runs ...
> > After 20min I do a reboot and continue reading c.l.l :)
> 
> You missed a:
> 
>      (setf *print-circle* t)
> 
> before.


[1]> (setf *print-circle* t)
T
[2]> (let ((a '#1=(1 . #1#))) a) #1=(1 . #1#)
                                 ------------ jep
#1=(1 . #1#)
[3]>
*** - EVAL: 1 is not a function name; try using a symbol instead
The following restarts are available:
USE-VALUE      :R1      You may input a value to be used instead.
ABORT          :R2      ABORT
Break 1 [4]>


why do I konw so less about that beauty....


btw, maybe when someone is interested in fibonacci:
I'm guessing around, in which number-system (de: zahlensystem) is it
possible to get the numbers of fibonacci as pure integers.
(/ 89 55)   = 1.518 181818
(* 87 1.51818)  --> should be integer value
(/ 55 1.51818)  --> should be interger value
... --> should all be integer values
I try some with (setf *print-base* xx), but could not find anything.
Also a pbase-function example from a book does not work.
(pbase 4 2) --> "1000"
From: Nathan Baum
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <deikbb$tcs$1@newsg1.svr.pol.co.uk>
lin8080 wrote:
> 
> [1]> (setf *print-circle* t)
> T
> [2]> (let ((a '#1=(1 . #1#))) a) #1=(1 . #1#)
>                                  ------------ jep
> #1=(1 . #1#)
> [3]>
> *** - EVAL: 1 is not a function name; try using a symbol instead
> The following restarts are available:
> USE-VALUE      :R1      You may input a value to be used instead.
> ABORT          :R2      ABORT
> Break 1 [4]>
> 
> 
> why do I konw so less about that beauty....
> 

The second "#1=(1 . #1#)" isn't input, it's the output. You broke it 
when you quoted Pascal's original post. It was:

  [6]> (let ((a '#1=(1 . #1#)))  a)
  #1=(1 . #1#)
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4q9lbzli.fsf@ccs.neu.edu>
Jon Harrop <······@jdh30.plus.com> writes:

> I'm more interested in the strengths of Lisp than its weaknesses. It is
> interesting to hear other people discuss infix vs prefix but I'd much
> rather know why macros are useful.

Functions provide a means of semantically extending the language.
Data structures provide a means of ontologically extending the language.

Macros provide a means of syntactically extending the language.

Consider these examples:  I often use Scheme, but I miss a lot of the
features of Common Lisp.  I have a (somewhat hairy) macro that extends
Scheme with Common Lisp lambda lists (with &optional, &rest, &aux,
&key --- the whole nine yards).

On the other hand, Scheme has this nifty extension to COND.  You can
put the `=>' token between a predicate and the consequence in a clause
and this arranges for the consequence (which should then be LAMBDA
expression or function name) to be invoked on the predicate.

   (cond ((foo x y) => (lambda (z) (+ z 3)))
         (t 22))

I have a macro for Common Lisp that extends it in the same way.


Now suppose I want these facilities in Java...

~jrm
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3pss9948a.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> ··············@hotmail.com wrote:

> > Your responses show that you do not understand the concept of Lisp
> > macros. Your mention of "AST" (abstract syntax trees) and comparisons
> > to Mathematica, for instance, are not on point.
> 
> Several people have said this but none have explained why.

Of course we have.  You just haven't paid any attention to the
answers.  Little wonder you still don't have a clue.


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4306228e$0$1300$ed2619ec@ptn-nntp-reader02.plus.net>
jayessay wrote:
> Jon Harrop <······@jdh30.plus.com> writes:
>> ··············@hotmail.com wrote:
>> > Your responses show that you do not understand the concept of Lisp
>> > macros. Your mention of "AST" (abstract syntax trees) and comparisons
>> > to Mathematica, for instance, are not on point.
>> 
>> Several people have said this but none have explained why.
> 
> Of course we have.  You just haven't paid any attention to the
> answers.  Little wonder you still don't have a clue.

Other people are now agreeing that Mathematica has similar capabilities.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: jayessay
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m3d5o991e3.fsf@rigel.goldenthreadtech.com>
Jon Harrop <······@jdh30.plus.com> writes:

> jayessay wrote:
> > Jon Harrop <······@jdh30.plus.com> writes:
> >> ··············@hotmail.com wrote:
> >> > Your responses show that you do not understand the concept of Lisp
> >> > macros. Your mention of "AST" (abstract syntax trees) and comparisons
> >> > to Mathematica, for instance, are not on point.
> >> 
> >> Several people have said this but none have explained why.
> > 
> > Of course we have.  You just haven't paid any attention to the
> > answers.  Little wonder you still don't have a clue.
> 
> Other people are now agreeing that Mathematica has similar capabilities.

What's that have to do with it?  Also, wrt Mathematica they are only
talking about the specific use case of "AST"s and "code generation".


/Jon

-- 
'j' - a n t h o n y at romeo/charley/november com
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de4v4t$hd0$1@nwrdmz01.dmz.ncs.ea.ibs-infra.bt.com>
> ··············@hotmail.com wrote:
>> I don't understand what you are trying to achieve by your chattering on
>> comp.lang.lisp.
>
JH> I am trying to find out what Lisp programmers think of the alternatives 
and
JH> what motivates them to continue using Lisp.

Because we hate ourselves and we want to die a slow, brain-damaging death 
from infix withdrawl.

[snip]

> If you believe I have said something wrong then I would appreciate it if 
> you
> could point out what, specifically.

Yes, you are knocking CL in c.l.l without being able to back up what you are 
saying, and without making your motivation clear.

[snip]

> I'm trying to decide if it is worth learning Lisp so I am more interested 
> in
> learning why Lisp programmers chose Lisp and seeing how a seasoned Lisp
> programmer can translate my ray tracer, for example. Why do you use Lisp?

I understand your reluctance.  I was reluctant for about 2 years.  Then I 
spent a few weeks fooling around with CLISP, SBCL and ACL.  Now I get paid 
to spend ~80% of my time writing CL code.

Seriously, Jon, you sound smart enough to be able to grasp the cool aspects 
of CL, and pragmatic enough to ignore the bits you don't like.

Start solving some real-world problems in CL.  After a while you will either 
"see the light", or get so pissed off with it that you leave it alone for 
good.

Jamie

> -- 
> Dr Jon D Harrop, Flying Frog Consultancy
> http://www.ffconsultancy.com 
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <cqoNe.77234$Ph4.2421167@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-19, Jon Harrop <······@jdh30.plus.com> wrote:
> I'm trying to decide if it is worth learning Lisp so I am more interested in
> learning why Lisp programmers chose Lisp and seeing how a seasoned Lisp
> programmer can translate my ray tracer, for example. Why do you use Lisp?

You might be interested in why I switched away from OCaml, to using CL
almost exclusively.  I think it's pretty common to assume that Lispers
haven't yet basked in the glory that is the modern functional
programming language; I know I was guilty of the same line of thought.

A few years ago, I was a huge fan of various modern languages,
especially OCaml.  I believed Hindley-Milner type inference and static
typing would solve all the world's problems, and I couldn't imagine
someone thinking otherwise.  Plus, OCaml created such /fast/ code!  Very
thrilling.  At work, I had been using OCaml and Haskell heavily for
anything I could get away with, and I had a number of hobbyist projects
in those languages on the go.

I had played with Lisp in various forms a number of times before, but I
never really got it.  However, this time, I was working on a networked
simulation where I wanted to be able to interactively hotpatch the
system.  After being disappointed with the possible approaches in ML,
and still having been too scared of the dynamic type monster to try
Erlang (which is a lovely language, I have since discovered), I decided
to give CL a try; I had read, after all, that one could declare types in
advance and that some compilers supported type inference.

While working on this system, I suddenly "got" macros.  There were many
things I liked about CL (numbers, conditions, what little bits of CLOS I
understood), but I had never before understood why people were so
fanatical about a dead, bloated language.

Now, I don't know what's happened with macro-alikes in those other
languages since I stopped following their progress, but CL is so good I
almost don't care.  I realized that StaticTypingRepelsElephants[1], and
mature development environments trump freshly reinvented wheels any day.
The speed difference between OCaml and CL is basically illusory -- the
free compilers keep getting better, and you always have the option of
buying one of several really great, really fast commercial compilers.

It's important not to forget that we shouldn't really be treating this
debate as a battle between languages -- though I prefer CL, if ML had
the commercial mindshare of Java, I would already be quite happy.  And,
I still like Haskell occasionally as a sort of DSL for expressing
certain mathematical problems.

> Obviously I cannot do a good enough job of the conversion myself to make for
> a fair comparison with the other languages (that I am much more familiar
> with). It is also interesting to study the differences between the 8 Lisp
> and 13 Scheme translations that have been done.

It's also interesting to note that while some skilled Lispers have put
their time into making your optimized translations, most of us really
don't care.  We have nothing to prove, and other code to write.

One of the reasons people on this group are calling your "objective
tests" into question is that CL's concision really only begins to shine
at (in my experience) a minimum of 500 lines of code or so.  After which
point it is so beautiful, so supportive of OAOO, that you can never
really go back to anything less.

Cheers.

[1] -- http://www.c2.com/cgi/wiki?StaticTypingRepelsElephants

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307e33e$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
This is very interesting, thanks. :-)

Julian Squires wrote:
> You might be interested in why I switched away from OCaml, to using CL
> almost exclusively.  I think it's pretty common to assume that Lispers
> haven't yet basked in the glory that is the modern functional
> programming language; I know I was guilty of the same line of thought.

Yes, I had heard that. I'm trying to keep an open mind. ;-)

> I had played with Lisp in various forms a number of times before, but I
> never really got it.  However, this time, I was working on a networked
> simulation where I wanted to be able to interactively hotpatch the
> system.

Ok. That sounds like an ideal application for Lisp and quite similar to
something I might be embarking on.

> ... I realized that StaticTypingRepelsElephants[1] ...,

Hmm, ditching static type checking certainly looks like a big step in the
wrong direction to me but maybe I have yet to see the light. :-)

> and  
> mature development environments trump freshly reinvented wheels any day.
> The speed difference between OCaml and CL is basically illusory -- the
> free compilers keep getting better, and you always have the option of
> buying one of several really great, really fast commercial compilers.

True.

> It's important not to forget that we shouldn't really be treating this
> debate as a battle between languages -- though I prefer CL, if ML had
> the commercial mindshare of Java, I would already be quite happy.  And,
> I still like Haskell occasionally as a sort of DSL for expressing
> certain mathematical problems.

Yes. I'm just interested in learning which languages are best for which
tasks.

> It's also interesting to note that while some skilled Lispers have put
> their time into making your optimized translations, most of us really
> don't care.  We have nothing to prove, and other code to write.

Sure.

> One of the reasons people on this group are calling your "objective
> tests" into question is that CL's concision really only begins to shine
> at (in my experience) a minimum of 500 lines of code or so.  After which
> point it is so beautiful, so supportive of OAOO, that you can never
> really go back to anything less.

Ok, so all the programs I'm comparing are <200LOC. Good point. It would be
hard to find data for much longer programs but I can imagine that Lisp's
macros give you extra factoring capability over OCaml's as its HOFs do over
C. Do you think that Lisp is more concise than OCaml, say?

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <jWRNe.77796$Ph4.2441858@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
>> I had played with Lisp in various forms a number of times before, but I
>> never really got it.  However, this time, I was working on a networked
>> simulation where I wanted to be able to interactively hotpatch the
>> system.
>
> Ok. That sounds like an ideal application for Lisp and quite similar to
> something I might be embarking on.

In an earlier post, you asked whether lispers tended to use the repl
more than mlers, or why, or similar.  It's certainly related to the ease
of dynmically changing the system.  When I do OCaml stuff, I still only
use the interactive toplevel for little tests; never the kind of
exploratory development I do in CL.  (Lots of things in CL are nicely
structured around this style of development -- docstrings, and
interactive restarts, for example.)

>> ... I realized that StaticTypingRepelsElephants[1] ...,
>
> Hmm, ditching static type checking certainly looks like a big step in the
> wrong direction to me but maybe I have yet to see the light. :-)

Well, what we can probably agree on is that static typing is less about
preventing certain kinds of bugs, and more about providing a useful
framework around which programs can be structured (so-called typeful
programming).  The small number of errors it tends to catch are
certainly nice, but they're not really significant.

I find that the same typeful programming style (and pattern matching,
and whatnot) is possible in CL, so when I need it (say, when translating
over some ML code), I use it.

>> It's important not to forget that we shouldn't really be treating this
>> debate as a battle between languages -- though I prefer CL, if ML had
>> the commercial mindshare of Java, I would already be quite happy.  And,
>> I still like Haskell occasionally as a sort of DSL for expressing
>> certain mathematical problems.
>
> Yes. I'm just interested in learning which languages are best for which
> tasks.

Agreed.  And OCaml is a great language for many tasks, I do agree.
It's certainly proven its capabilities in programming competitions.

>> It's also interesting to note that while some skilled Lispers have put
>> their time into making your optimized translations, most of us really
>> don't care.  We have nothing to prove, and other code to write.
>
> Sure.
>
>> One of the reasons people on this group are calling your "objective
>> tests" into question is that CL's concision really only begins to shine
>> at (in my experience) a minimum of 500 lines of code or so.  After which
>> point it is so beautiful, so supportive of OAOO, that you can never
>> really go back to anything less.
>
> Ok, so all the programs I'm comparing are <200LOC. Good point. It would be
> hard to find data for much longer programs but I can imagine that Lisp's
> macros give you extra factoring capability over OCaml's as its HOFs do over
> C. Do you think that Lisp is more concise than OCaml, say?

I realize I'm duplicating a bit of what I said in the other thread of
this thread, but it's nice to expand on, anyway.  In a local sense,
OCaml is a more concise language -- a literal translation of a slab of
OCaml into Lisp (without attempts to lispify it) will likely result in
more verbose code.  However, macros do indeed open avenues of factoring
similar to (some would say greater than) higher-order functions in their
ability to reduce code size.  A literal translation of a slab of Lisp
into OCaml is often impossible.

Cheers.

-- 
Julian Squires
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307fa75$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Julian Squires wrote:
> In an earlier post, you asked whether lispers tended to use the repl
> more than mlers, or why, or similar.  It's certainly related to the ease
> of dynmically changing the system.  When I do OCaml stuff, I still only
> use the interactive toplevel for little tests; never the kind of
> exploratory development I do in CL.  (Lots of things in CL are nicely
> structured around this style of development -- docstrings, and
> interactive restarts, for example.)

I see.

> Well, what we can probably agree on is that static typing is less about
> preventing certain kinds of bugs, and more about providing a useful
> framework around which programs can be structured (so-called typeful
> programming).  The small number of errors it tends to catch are
> certainly nice, but they're not really significant.

I would actually disagree with that. I've found that static type checking
removes an enormous number of bugs. You have to learn a lot before you can
really leverage it but it has let me prove big chunks of otherwise
error-prone code to be safe and has removed a lot of mistakes.

> I find that the same typeful programming style (and pattern matching,
> and whatnot) is possible in CL, so when I need it (say, when translating
> over some ML code), I use it.

I read that wiki on "typeful programming" and got the impression that it
meant phantom types. Is that right? Can you exploit phantom types in CL?

>> Yes. I'm just interested in learning which languages are best for which
>> tasks.
> 
> Agreed.  And OCaml is a great language for many tasks, I do agree.
> It's certainly proven its capabilities in programming competitions.

Mathematica and OCaml fill most of my needs but people keep telling me about
this Lisp language...

> I realize I'm duplicating a bit of what I said in the other thread of
> this thread, but it's nice to expand on, anyway.  In a local sense,
> OCaml is a more concise language -- a literal translation of a slab of
> OCaml into Lisp (without attempts to lispify it) will likely result in
> more verbose code.  However, macros do indeed open avenues of factoring
> similar to (some would say greater than) higher-order functions in their
> ability to reduce code size.  A literal translation of a slab of Lisp
> into OCaml is often impossible.

Apparently I am about to find this out...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mr4ubF17og0pU1@individual.net>
Jon Harrop wrote:
> Mathematica and OCaml fill most of my needs but people keep telling me about
> this Lisp language...

Sorry if this has already been mentioned, but if you'd like a nice 
introduction, read www.gigamonkeys.com/book (Practical Common Lisp). 
Paul Graham's "On Lisp" reportedly has loads of advanced macro stuff in 
it.  It would be interesting to know how a Caml guy would code the 
various examples.

-- 
I believe in Karma.  That means I can do bad things to people
all day long and I assume they deserve it.
	Dogbert
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43086276$0$97099$ed2619ec@ptn-nntp-reader03.plus.net>
Ulrich Hobelmann wrote:
> Sorry if this has already been mentioned, but if you'd like a nice
> introduction, read www.gigamonkeys.com/book (Practical Common Lisp).

I've started reading that (in the middle). It's very comprehensible.

> Paul Graham's "On Lisp" reportedly has loads of advanced macro stuff in
> it. It would be interesting to know how a Caml guy would code the 
> various examples.

Sure, I'll post back as I read it. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Julian Squires
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <811Oe.77917$Ph4.2447068@ursa-nb00s0.nbnet.nb.ca>
On 2005-08-21, Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
>> Well, what we can probably agree on is that static typing is less about
>> preventing certain kinds of bugs, and more about providing a useful
>> framework around which programs can be structured (so-called typeful
>> programming).  The small number of errors it tends to catch are
>> certainly nice, but they're not really significant.
>
> I would actually disagree with that. I've found that static type checking
> removes an enormous number of bugs. You have to learn a lot before you can
> really leverage it but it has let me prove big chunks of otherwise
> error-prone code to be safe and has removed a lot of mistakes.

We'll have to agree to disagree on that one.

>> I find that the same typeful programming style (and pattern matching,
>> and whatnot) is possible in CL, so when I need it (say, when translating
>> over some ML code), I use it.
>
> I read that wiki on "typeful programming" and got the impression that it
> meant phantom types. Is that right? Can you exploit phantom types in CL?

I'm not familiar with the term, but from reading online this seems to
mean using the type system to guarantee invariants such as
well-formedness in data.  With typeful programming, I was thinking more
in terms of the way that types and pattern matching shape the way one
writes programs in ML or Haskell, but phantom types definitely apply in
that case, too.  As for getting phantom type behavior in CL, I don't
think it's nearly as complete as in Haskell, but with a good compiler
and certain amount of specified type information, you can get similar
assurances of type-related invariants/well-formedness.

You might be interested in reading the CMUCL manual on type inference:
<http://common-lisp.net/project/cmucl/doc/cmu-user/compiler-hint.html#toc152>

Cheers.

-- 
Julian Squires
From: Vesa Karvonen
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <df5fgq$hjq$1@oravannahka.helsinki.fi>
Jon Harrop <······@jdh30.plus.com> wrote:
> Julian Squires wrote:
[...]
> > [...] However, macros do indeed open avenues of factoring similar to
> > (some would say greater than) higher-order functions in their ability
> > to reduce code size. A literal translation of a slab of Lisp into
> > OCaml is often impossible.

> Apparently I am about to find this out...

If you are reading On Lisp, then you should soon have some idea of what
you can do with Lisp (and Scheme) macros. Macros essentially allow you to
extend the syntax of the language. In particular, they allow you to
implement new binding forms, which is strictly impossible to do with
higher-order functions. Macros also allow you to perform arbitrary
computations in compile-time. In some cases you can get similar effects
with an optimizing compiler and very careful staging of computations, but
the compile-time evaluation of expressions is usually not guaranteed by
the language semantics.

As a non-trivial, but not extremely complex, example of what you can do
with macros, I'd like to point you to a paper on implementing Lex and Yacc
in Scheme:

  http://library.readscheme.org/servlets/search.ss?kwd=lexer+parser&en=Enter .

I'd like to highlight the fact the parser languages both define new
binding forms and also perform significant computations in compile-time.
Doing the *equivalent* in Ocaml requires using Caml P4 and it is much
harder. Writing parsing combinators that offer similar performance to Yacc
is non-trivial (typical (monadic) combinators are very inefficient).

I think that Julian's comment is quite defendable; it is usually easy to
make a fairly literal conversion of arbitrary Ocaml code into Lisp/Scheme
(using some auxiliary macros, like macros for pattern matching
  (http://download.plt-scheme.org/doc/299.200/html/mzlib/mzlib-Z-H-24.html#node_chap_24)
and static typing), but it can be next to impossible to achieve a literal
conversion of arbitrary Lisp/Scheme code that makes non-trivial use of
macros into Ocaml.

-Vesa Karvonen

P.S. Please excuse me for posting Scheme links to a Lisp NG. :-)
From: Ulrich Hobelmann
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3nnof6F2av6sU1@individual.net>
Vesa Karvonen wrote:
>   http://library.readscheme.org/servlets/search.ss?kwd=lexer+parser&en=Enter .

Hm, sounds interesting... :)

> I'd like to highlight the fact the parser languages both define new
> binding forms and also perform significant computations in compile-time.
> Doing the *equivalent* in Ocaml requires using Caml P4 and it is much
> harder. Writing parsing combinators that offer similar performance to Yacc
> is non-trivial (typical (monadic) combinators are very inefficient).

That's why for all practical purposes SML has ML-yacc and ML-lex (I 
guess OCaml has something similar), which is a totally different 
language that *compiles* to SML.  I don't know how much of the SML 
templates you write in ML-yacc is checked at all by the ml-yacc 
compiler; probably most is left to SML.

The nice thing about Lisp is that it doesn't take another file of type 
.lex to write your lexer.  If it's just a trivial config-file parser, 
you could put in somewhere into the config-file-processing file.

> P.S. Please excuse me for posting Scheme links to a Lisp NG. :-)

If the above paper uses Scheme macros to build the parser, then it's 
even more interesting (to see how people can get anything written in 
those ;) ).

-- 
My ideal for the future is to develop a filesystem remote interface
(a la Plan 9) and then have it implemented across the Internet as
the standard rather than HTML.  That would be ultimate cool.
	Ken Thompson
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <S%qNe.196515$HI.12739@edtnps84>
Jon Harrop wrote:

> 
> 
> I'm trying to decide if it is worth learning Lisp so I am more interested in
> learning why Lisp programmers chose Lisp and seeing how a seasoned Lisp
> programmer can translate my ray tracer, for example. Why do you use Lisp?
> 
> Obviously I cannot do a good enough job of the conversion myself to make for
> a fair comparison with the other languages (that I am much more familiar
> with). It is also interesting to study the differences between the 8 Lisp
> and 13 Scheme translations that have been done.
> 
> So now I'd like to know how Lisp is more expressive than the alternatives.
> What practical problems are easier to write in Lisp than in other
> languages? Macros seem to be a strong point here. Lisp's macros are more
> powerful than OCaml's and Lisp programs are likely to be faster than
> Mathematica programs. So there is one niche. Are there others?
> 
> 

Well, real world programming can me quite ugly.  Here is an example
of a program in Lisp which is non-trivial (no macros, well not
quite true... capi:define-interface is one such builtin LispWorks
macro, oh ... and defclass and capi:with-geometry, etc etc.).

http://www3.telus.net/public/whumeniu/concentration.lisp

A Windows exe of the program is downloadable from

http://www3.telus.net/public/whumeniu/concentration.zip

If you would write a program with the same functionality in
Ocaml (for comparison) it would be interesting.  This program was written in
response to a c.l.l discussion were someone was making similar
queries about CL (but from a C++ shareware prespective).

Wade
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307dce4$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Wade Humeniuk wrote:
> Well, real world programming can me quite ugly.  Here is an example
> of a program in Lisp which is non-trivial (no macros, well not
> quite true... capi:define-interface is one such builtin LispWorks
> macro, oh ... and defclass and capi:with-geometry, etc etc.).
> 
> http://www3.telus.net/public/whumeniu/concentration.lisp
> 
> A Windows exe of the program is downloadable from
> 
> http://www3.telus.net/public/whumeniu/concentration.zip

I'll have a look, but if it doesn't use macros then I'd expect the ML
equivalent to be shorter, faster and more robust...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de8plo$a1j$1@ulric.tng.de>
Jon Harrop schrieb:

> 
> I'll have a look, but if it doesn't use macros then I'd expect the ML
> equivalent to be shorter, faster and more robust...

As it doesn't use macros I expect that after you had a look you can 
provide your shorter, faster and more robust solution. The lisp source 
has about 170 LOC (without comments and spaces).

How many LOC do you need in ML to embed prolog (logical programming) 
into it? Probably much less than the 180 of Paul Graham, and the ML code 
would run so much faster ;)


Andr�
-- 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307f051$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Jon Harrop schrieb:
>> I'll have a look, but if it doesn't use macros then I'd expect the ML
>> equivalent to be shorter, faster and more robust...
> 
> As it doesn't use macros I expect that after you had a look you can
> provide your shorter, faster and more robust solution. The lisp source
> has about 170 LOC (without comments and spaces).

Yes. The main problem is that I can't run it and I don't know what it
does... :-(

> How many LOC do you need in ML to embed prolog (logical programming)
> into it? Probably much less than the 180 of Paul Graham, and the ML code
> would run so much faster ;)

Yes. There is certainly an activation barrier to implementing DSLs in ML as
you need to write an interpreter (well, if like me you don't know camlp4).
Also, the resulting DSL would not benefit from the optimisations provided
by a Lisp compiler, so then you'd have to implement those.

An excellent point...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <A2UNe.209801$tt5.175950@edtnps90>
Jon Harrop wrote:
> Andr� Thieme wrote:
> 
>>Jon Harrop schrieb:
>>
>>>I'll have a look, but if it doesn't use macros then I'd expect the ML
>>>equivalent to be shorter, faster and more robust...
>>
>>As it doesn't use macros I expect that after you had a look you can
>>provide your shorter, faster and more robust solution. The lisp source
>>has about 170 LOC (without comments and spaces).
> 
> 
> Yes. The main problem is that I can't run it and I don't know what it
> does... :-(
> 
> 

Its the children's game Concentration.  It is a short term memory game
where one looks for pairs (or triplets, etc) of matching images.
The images are initially face down.  You turn over two cards
(for the match 2 game), if they do not match they must be flipped
down again.  If they match they remain face up.  Proceed until all
the images are face up.  This computer version is GUI based, mouse
gestures used to turn images face up.

What kind of system are you running? Linux?  If you are I can spend
a little time and make a Linux version.  If it is a Mac I am sure
others here could deliver a Mac version.

If you have the time you can peruse the thread "C++ sucks for games"
in c.l.l .  There is a version of the same game in C++.

Wade
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <430815a4$0$97099$ed2619ec@ptn-nntp-reader03.plus.net>
Wade Humeniuk wrote:
> Its the children's game Concentration.  It is a short term memory game
> where one looks for pairs (or triplets, etc) of matching images.
> The images are initially face down.  You turn over two cards
> (for the match 2 game), if they do not match they must be flipped
> down again.  If they match they remain face up.  Proceed until all
> the images are face up.  This computer version is GUI based, mouse
> gestures used to turn images face up.

Hmm, ok.

> What kind of system are you running? Linux?  If you are I can spend
> a little time and make a Linux version.  If it is a Mac I am sure
> others here could deliver a Mac version.

Linux. I can't afford much time for it so don't put yourself out. But I
would be interested in trying and it would be good for the record...

> If you have the time you can peruse the thread "C++ sucks for games"
> in c.l.l .  There is a version of the same game in C++.

LOL, great name. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <d_YNe.209817$tt5.35234@edtnps90>
Jon Harrop wrote:

> 
>>What kind of system are you running? Linux?  If you are I can spend
>>a little time and make a Linux version.  If it is a Mac I am sure
>>others here could deliver a Mac version.
> 
> 
> Linux. I can't afford much time for it so don't put yourself out. But I
> would be interested in trying and it would be good for the record...
> 

Here is the Linux version.  It took me about 20 minutes to port.  All
issues revolve around font problems.  The Linux version uses the Symbol
font for images (perhaps someone knows how to get the funner Webdings
like font in X).

http://www3.telus.net/public/whumeniu/concentration.tgz

This archive contains the source, the delivery script and a delivered
LWL image (delivery level 5).  Your Linux system will need the Motif
libraries installed (preferably openmotif though I think lesstif should
work).  There may be some issues with Linux (maybe Linux version problems).
I am not really running Linux but Linux Compatibility mode under FreeBSD
(Redhat 8 libraries).

Just for the record part of the original C++/Lisp code comparison was how
quick development time was.  My original version took ~3hrs to write,
the final version (with my son feature testing) was ~8hrs.  Gerry Quinn,
who wrote the C++ version also said it was just a few hours work.

Wade

> 
>>If you have the time you can peruse the thread "C++ sucks for games"
>>in c.l.l .  There is a version of the same game in C++.
> 
> 
> LOL, great name. :-)
> 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43086237$0$97099$ed2619ec@ptn-nntp-reader03.plus.net>
Wade Humeniuk wrote:
> Here is the Linux version.  It took me about 20 minutes to port.  All
> issues revolve around font problems.  The Linux version uses the Symbol
> font for images (perhaps someone knows how to get the funner Webdings
> like font in X).
> 
> http://www3.telus.net/public/whumeniu/concentration.tgz
> 
> This archive contains the source, the delivery script and a delivered
> LWL image (delivery level 5).  Your Linux system will need the Motif
> libraries installed (preferably openmotif though I think lesstif should
> work).  There may be some issues with Linux (maybe Linux version
> problems). I am not really running Linux but Linux Compatibility mode
> under FreeBSD (Redhat 8 libraries).

Wow, thanks.

> Just for the record part of the original C++/Lisp code comparison was how
> quick development time was.  My original version took ~3hrs to write,
> the final version (with my son feature testing) was ~8hrs.  Gerry Quinn,
> who wrote the C++ version also said it was just a few hours work.

I don't think I can beat 3hrs development time (having never used Tk before)
but I'll give it a go. Can you repost the C++? I couldn't find it in the
~1030-post thread (!).

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <KlkOe.152077$wr.124453@clgrps12>
Jon Harrop wrote:

> 
> 
> I don't think I can beat 3hrs development time (having never used Tk before)
> but I'll give it a go. Can you repost the C++? I couldn't find it in the
> ~1030-post thread (!).
> 

Hmm,

I cannot seem to find it either.  I am sure I saw it.  Oh well,
maybe we will have to email Gerry Quinn to find it.

Wade
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124708034.807344.114490@o13g2000cwo.googlegroups.com>
Wade Humeniuk wrote:
> Jon Harrop wrote:
>
> >
> >>What kind of system are you running? Linux?  If you are I can spend
> >>a little time and make a Linux version.  If it is a Mac I am sure
> >>others here could deliver a Mac version.
> >
> >
> > Linux. I can't afford much time for it so don't put yourself out. But I
> > would be interested in trying and it would be good for the record...
> >
>
> Here is the Linux version.  It took me about 20 minutes to port.  All
> issues revolve around font problems.  The Linux version uses the Symbol
> font for images (perhaps someone knows how to get the funner Webdings
> like font in X).
>
> http://www3.telus.net/public/whumeniu/concentration.tgz
>
> This archive contains the source, the delivery script and a delivered
> LWL image (delivery level 5).  Your Linux system will need the Motif
> libraries installed (preferably openmotif though I think lesstif should
> work).  There may be some issues with Linux (maybe Linux version problems).
> I am not really running Linux but Linux Compatibility mode under FreeBSD
> (Redhat 8 libraries).
>
> Just for the record part of the original C++/Lisp code comparison was how
> quick development time was.  My original version took ~3hrs to write,
> the final version (with my son feature testing) was ~8hrs.  Gerry Quinn,
> who wrote the C++ version also said it was just a few hours work.

Although it's quite a nice program, and quite simple, I don't think it
demonstrates many of the features of CL.  I expect it could be done
quite simply in many other languages.
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <KkkOe.152076$wr.116558@clgrps12>
Rob Thorpe wrote:
> Wade Humeniuk wrote:
> 

> 
> 
> Although it's quite a nice program, and quite simple, I don't think it
> demonstrates many of the features of CL.  I expect it could be done
> quite simply in many other languages.
> 

Really??  Show me.  The thing that this CL program really shows, (that
makes it more expressive and terse) is the dynamic nature of Lisp.  In
this case the way its coded to simply allow various _dynamic_ changes
to board size and number of picks.  Other static languages (like C++)
have to resort to implementing dynamic Lisp features to get the same
effects.

In the code the line of code which is snuck in is within the definition
of the board interface

    (games-offered :initform '((4 4 2) (4 5 2) (4 6 2)( 6 6 2) (6 6 3) (6 9 2) (6 9 3) (8 
9 4))
                   :initarg :games-offered :accessor games-offered))


Wade
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124726676.494853.114990@g44g2000cwa.googlegroups.com>
Wade Humeniuk wrote:
> Rob Thorpe wrote:
> > Wade Humeniuk wrote:
> >
>
> >
> >
> > Although it's quite a nice program, and quite simple, I don't think it
> > demonstrates many of the features of CL.  I expect it could be done
> > quite simply in many other languages.
> >
>
> Really??  Show me.  The thing that this CL program really shows, (that
> makes it more expressive and terse) is the dynamic nature of Lisp.  In
> this case the way its coded to simply allow various _dynamic_ changes
> to board size and number of picks.  Other static languages (like C++)
> have to resort to implementing dynamic Lisp features to get the same
> effects.

I wasn't comparing it to languages like C++, but rather to other
languages capable of working in a quite dynamic way, like the MLs.

It would also be quite simple in Python or Perl.
From: Wade Humeniuk
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <SBGOe.212065$tt5.169435@edtnps90>
Rob Thorpe wrote:

> 
> I wasn't comparing it to languages like C++, but rather to other
> languages capable of working in a quite dynamic way, like the MLs.
> 
> It would also be quite simple in Python or Perl.
> 

Well, lets see it.  This 10sec assertion is too easy to make.

Wade
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4309e566$0$97131$ed2619ec@ptn-nntp-reader03.plus.net>
Andr� Thieme wrote:
> Jon Harrop schrieb:
>> I'll have a look, but if it doesn't use macros then I'd expect the ML
>> equivalent to be shorter, faster and more robust...
> 
> As it doesn't use macros I expect that after you had a look you can
> provide your shorter, faster and more robust solution. The lisp source
> has about 170 LOC (without comments and spaces).

I just posted my shorter, faster and more robust OCaml implementation that
also took less time to develop. :-p

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dee20a$o5u$1@ulric.tng.de>
Jon Harrop schrieb:
> Andr� Thieme wrote:
> 
>>Jon Harrop schrieb:
>>
>>>I'll have a look, but if it doesn't use macros then I'd expect the ML
>>>equivalent to be shorter, faster and more robust...
>>
>>As it doesn't use macros I expect that after you had a look you can
>>provide your shorter, faster and more robust solution. The lisp source
>>has about 170 LOC (without comments and spaces).
> 
> 
> I just posted my shorter, faster and more robust OCaml implementation that
> also took less time to develop. :-p

Cool, where did you post it?


Andr�
-- 
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dee2ml$oiv$1@ulric.tng.de>
Found it.


Andr�
-- 
From: Pascal Costanza
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3mpcldF16uj9dU1@individual.net>
Jon Harrop wrote:

> So now I'd like to know how Lisp is more expressive than the alternatives.
> What practical problems are easier to write in Lisp than in other
> languages? Macros seem to be a strong point here. Lisp's macros are more
> powerful than OCaml's and Lisp programs are likely to be faster than
> Mathematica programs. So there is one niche. Are there others?

It seems to me that you're not quite asking the right questions.

Read http://www.paulgraham.com/rootsoflisp.html - this explains the 
basic idea that programs and data are the same.

Then read 
http://library.readscheme.org/servlets/search.ss?kwd=Art+of+the+Interpreter&en=Enter 
- this explains how to make use of that idea in interpreters.

Macros are functions that map s-expressions to s-expressions, and they 
just make use of that idea in compilers. Read a good tutorial on macros. 
Several were suggested, I can recommend Peter Seibel's "Practical Common 
Lisp", Peter Norvig's "Paradigms of Artificial Intelligence 
Programming", and Paul Graham's "On Lisp".

The essence is that Lisp allows you to embed any kind of language, and 
mix Lisp and the various embedded languages in your programs. The 
barrier of creating new embedded languages (or language fragments) is so 
low that programming in a domain-specific language for your problem is 
the norm, without necessarily recognizing this (in contrast to other DSL 
frameworks where you need a heavy-weight top-down approach to specify 
grammars, semantics, etc., of your DSLs).

Lisp's syntax, or rather lack thereof, is very important in this regard 
because it's "paradigm-neutral". The pattern is always that the first 
element of a list determines what the whole expression means. For 
example, you can build parts of your program in CLOS (the OOP layer in 
Common Lisp), but treat those parts as functional abstractions in other 
parts. You can also change your mind about specific design decisions 
later without necessarily affecting all the use sites.

Lispers appreciate that power so much that they disregard any kind of 
advantages that more sophisticated syntaxes could bring because they 
pale in comparison.

See also http://www.lisp.org/table/objects.htm and maybe even 
http://p-cos.net/lisp/guide.html


Pascal

-- 
OOPSLA'05 tutorial on generic functions & the CLOS Metaobject Protocol
++++ see http://p-cos.net/oopsla05-tutorial.html for more details ++++
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2mzndq329.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Andr� Thieme wrote:

>> How would you implement an Unit Test Framework in ML in 16 lines (not
>> counting comments and blank lines?) which can be used for some simple
>> programs?
>
> Can you describe what that is?

I don't know if this is what Andre was thinking of, but you can read
about one such test framework at:

 <http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html>

Bear in mind that that is Chapter 9 of a book so you may need to skim
some of the earlier bits (also available online--see my .sig) to get
your bearings.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <de8rgq$b3o$1@ulric.tng.de>
Jon Harrop schrieb:
> Andr� Thieme wrote:
> 
>>What you did was to create artificially an example which serves your
>>purpose of showing how much better ML can be read.
> 
> The same argument applies to basically all functions that I want to write.

Great, so use ML.


>>What you did not do 
>>was to mention how the pure functional programming style can complicate
>>things.
> 
> 
> If an imperative style is better, then use an imperative style. This can be
> done directly in ML or indirectly using monads in Haskell.

If you want to learn Lisp do it and then compare how easy and natural 
you find it to use imperative style.


>>What if you need to pass an argument through 12 functions 
>>because you can't save it anywhere? Is ML then still easier to parse?
> 
> 
> let f a b c d e f g h i j k l =
>   let f1 = ... in
>   let f2 = ... in
> 
> That seems pretty simple to me. If you would resort to an imperative style
> in Lisp simply because it cannot express this so easily then I would say
> that is another bullet in Lisp's syntax. However, my guess is that seasoned
> Lisp programmers would not write this the way you would...

My guess is that a seasoned Lisp programmer would use Lisp, not ML.



>>How would you implement an Unit Test Framework in ML in 16 lines (not
>>counting comments and blank lines?) which can be used for some simple
>>programs?
> 
> 
> Can you describe what that is?

Peter already posted the link, but in the case you missed that posting:
http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html



> 
>>How readable are webservers written in ML?
> 
> 
> In OCaml, very readable.  Same goes for DNS, SSH and so forth. Judging by the
> results of the ray tracer, it would be extremely difficult to write an SSH
> server in Lisp which had comparable performance.

OCaml might be nice for some tasks, but it also introduces its own 
problems, like the type inference. To keep a good overview will get 
harder and harder cause it gets difficult to say why a type is not 
correct (when the type is a result of a complicated dependency graph).
One could go arround that by explicitely state the types, but that will 
also get complicated cause you need to change the prototypes on every 
change (similar to C++). And if you want to do that automatically you 
need to split a program into many modules (if the modules are too big 
finding errors will get complicated again). OCamls syntax is also very 
"short", so often errors are in a very difference place than one might 
think. The compiler tells you a line but the real error is 31 lines 
above. Not really what I want.

And then again, couldn't it be a bit premature to judge the performance 
of Lisp after you have seen not much more than a raytracer?
Why not rewrite your raytracer in assembler and enjoy the 5x speed up?



>>Maybe ML is only useful for 
>>some specific tiny sets of pure mathematical applications, used in
>>universities?
> 
> 
> Given that my company is using OCaml for scientific computing and
> visualisation applications in industry, I think we can safely say that your
> assertion is definitely wrong.

That is great for you. Anyway, why isn't your company using ML?


Andr�
-- 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4307fac6$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Andr� Thieme wrote:
> Jon Harrop schrieb:
>>>What you did not do
>>>was to mention how the pure functional programming style can complicate
>>>things.
>> 
>> If an imperative style is better, then use an imperative style. This can
>> be done directly in ML or indirectly using monads in Haskell.
> 
> If you want to learn Lisp do it and then compare how easy and natural
> you find it to use imperative style.

But would you use an imperative style to do that in Lisp? I'd have expected
a functional style with nested definitions, like the ML.

>>>How would you implement an Unit Test Framework in ML in 16 lines (not
>>>counting comments and blank lines?) which can be used for some simple
>>>programs?
>> 
>> Can you describe what that is?
> 
> Peter already posted the link, but in the case you missed that posting:
>
http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html

I read that page but it doesn't give a definition of the term. If my
inference is correct then I'd probably write this kind of thing using HOFs
in OCaml rather than macros. There are probably some cases that couldn't be
handled like that but I'm not sure what.

> OCaml might be nice for some tasks, but it also introduces its own
> problems, like the type inference. To keep a good overview will get 
> harder and harder cause it gets difficult to say why a type is not
> correct (when the type is a result of a complicated dependency graph).
> One could go arround that by explicitely state the types, but that will
> also get complicated cause you need to change the prototypes on every
> change (similar to C++).

You don't want to declare all types. You probably want to declare
non-trivial tuples as records and most variant types. Polymorphic variants
are basically type-inferred variant types.

> And if you want to do that automatically you 
> need to split a program into many modules (if the modules are too big
> finding errors will get complicated again). OCamls syntax is also very
> "short", so often errors are in a very difference place than one might
> think. The compiler tells you a line but the real error is 31 lines
> above. Not really what I want.

Yes. Errors can cause the wrong types to be inferred which then show up
later on. With emacs, you do C-C C-T to get the inferred type of the
current subexpression and that tells you if the error was in the
definition.

> And then again, couldn't it be a bit premature to judge the performance
> of Lisp after you have seen not much more than a raytracer?

I've looked up other results. Performance comparisons seem to erupt on this
newsgroup every so often, typically comparing with C/C++. The results are
very varied but one observation is that it takes a lot of work and know-how
to optimise Lisp code.

> Why not rewrite your raytracer in assembler and enjoy the 5x speed up?

The Lisp programs are being compared to unoptimised code in the other
languages. There are plenty of optimisations to be applied before resorting
to assembler.

> Anyway, why isn't your company using ML?

OCaml is an ML.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2br3rx6no.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Andr� Thieme wrote:
>> Jon Harrop schrieb:

>>>> How would you implement an Unit Test Framework in ML in 16 lines
>>>> (not counting comments and blank lines?) which can be used for
>>>> some simple programs?
>>> 
>>> Can you describe what that is?
>> 
>> Peter already posted the link, but in the case you missed that posting:
>>
> http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html
>
> I read that page but it doesn't give a definition of the term. If my
> inference is correct then I'd probably write this kind of thing
> using HOFs in OCaml rather than macros. There are probably some
> cases that couldn't be handled like that but I'm not sure what.

Hmmm. Maybe I didn't give a definition but I gave examples of who you
would use it and the complete working code. Since, as Andre points
out, it's on the order of a dozen and half lines of code, maybe you
can take the time to write up your HOF version in OCaml--I'd be quite
curious to see what that would look like since FP folks often claim
that Lisp macros are unecessary given higher-order functions. (Which
is strange on the face of it since Lisp has higher-order functions yet
Lisp programmers still find plenty of uses for macros.) At any rate,
this test framework is, I think, a good litmus test for the
HOFs-can-replace-macros theory since it's pretty much about pure
syntactic abstraction--if you can write a OCaml test framework that
makes it as easy to write simple unit tests for OCaml code as that
test framework does for Common Lisp, I'd love to see it. (Seriously--I
regret not knowing more about any of the the purer FP languages;
learning OCaml and and Haskell are on my todo list.)

-Peter


-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <4308066b$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Peter Seibel wrote:
> Hmmm. Maybe I didn't give a definition but I gave examples of who you
> would use it and the complete working code. Since, as Andre points
> out, it's on the order of a dozen and half lines of code, maybe you
> can take the time to write up your HOF version in OCaml--I'd be quite
> curious to see what that would look like since FP folks often claim
> that Lisp macros are unecessary given higher-order functions. (Which
> is strange on the face of it since Lisp has higher-order functions yet
> Lisp programmers still find plenty of uses for macros.) At any rate,
> this test framework is, I think, a good litmus test for the
> HOFs-can-replace-macros theory since it's pretty much about pure
> syntactic abstraction--if you can write a OCaml test framework that
> makes it as easy to write simple unit tests for OCaml code as that
> test framework does for Common Lisp, I'd love to see it. (Seriously--I
> regret not knowing more about any of the the purer FP languages;
> learning OCaml and and Haskell are on my todo list.)

Well, you should probably take what I'm about to say with a pinch of salt
because I don't know anything about macros (as we've just witnessed!) or
unit testing.

I would disagree with them. I don't think HOFs can replace macros. Firstly,
HOFs are not partially specialised (although I can't think why not, the
FPLs I know don't do it) and I think macros are, in a statically typed
language the unit test code may not be typeable (depending on the
language's type system) and my guess is that Lisp's eval/apply approach
facilitates more factoring, giving smaller and more reusable code.

However, I think that unit testing is probably much less needed when you
have static type checking to catch your errors. Also, whenever I have
written auxiliary code to stress test pieces of an application it has only
required ~10LOC and I've only done that a few times in my life. So although
the brevity of that Lisp solution is impressive, it is also shooting the
argument for its own existence in the foot by being so concise.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2y86vvqbh.fsf@gigamonkeys.com>
Jon Harrop <······@jdh30.plus.com> writes:

> Peter Seibel wrote:
>> Hmmm. Maybe I didn't give a definition but I gave examples of who you
>> would use it and the complete working code. Since, as Andre points
>> out, it's on the order of a dozen and half lines of code, maybe you
>> can take the time to write up your HOF version in OCaml--I'd be quite
>> curious to see what that would look like since FP folks often claim
>> that Lisp macros are unecessary given higher-order functions. (Which
>> is strange on the face of it since Lisp has higher-order functions yet
>> Lisp programmers still find plenty of uses for macros.) At any rate,
>> this test framework is, I think, a good litmus test for the
>> HOFs-can-replace-macros theory since it's pretty much about pure
>> syntactic abstraction--if you can write a OCaml test framework that
>> makes it as easy to write simple unit tests for OCaml code as that
>> test framework does for Common Lisp, I'd love to see it. (Seriously--I
>> regret not knowing more about any of the the purer FP languages;
>> learning OCaml and and Haskell are on my todo list.)
>
> Well, you should probably take what I'm about to say with a pinch of salt
> because I don't know anything about macros (as we've just witnessed!) or
> unit testing.
>
> I would disagree with them. I don't think HOFs can replace macros. Firstly,
> HOFs are not partially specialised (although I can't think why not, the
> FPLs I know don't do it) and I think macros are, in a statically typed
> language the unit test code may not be typeable (depending on the
> language's type system) and my guess is that Lisp's eval/apply approach
> facilitates more factoring, giving smaller and more reusable code.
>
> However, I think that unit testing is probably much less needed when you
> have static type checking to catch your errors. Also, whenever I have
> written auxiliary code to stress test pieces of an application it has only
> required ~10LOC and I've only done that a few times in my life. So although
> the brevity of that Lisp solution is impressive, it is also shooting the
> argument for its own existence in the foot by being so concise.

Uh, I'm not sure I get that last bit of logic. But nevermind. Anyway,
if you know any OCamlers (Ocamlites?) who actually believe in testing
their code for errors other than type errors and who might be up for
the challenge, please do point them to my previous post--I'd love to
see an Ocaml (or Haskell or SML or whatever) implementation of a
simple unit test framework.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43080b5b$0$1285$ed2619ec@ptn-nntp-reader02.plus.net>
Peter Seibel wrote:
> Uh, I'm not sure I get that last bit of logic. But nevermind. Anyway,
> if you know any OCamlers (Ocamlites?) who actually believe in testing
> their code for errors other than type errors and who might be up for
> the challenge, please do point them to my previous post--I'd love to
> see an Ocaml (or Haskell or SML or whatever) implementation of a
> simple unit test framework.

Will do. I'll see if I can work it out...

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Neelakantan Krishnaswami
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <slrndgn6si.9bm.neelk@gs3106.sp.cs.cmu.edu>
In article <··············@gigamonkeys.com>, Peter Seibel wrote:
> 
> Uh, I'm not sure I get that last bit of logic. But nevermind. Anyway,
> if you know any OCamlers (Ocamlites?) who actually believe in testing
> their code for errors other than type errors and who might be up for
> the challenge, please do point them to my previous post--I'd love to
> see an Ocaml (or Haskell or SML or whatever) implementation of a
> simple unit test framework.

Here's the signature of a very minimalist unit test framework in
Ocaml.

module type TEST = sig
  type t
  type result = Pass of string | Fail of string | Broke of string * exn

  val test : name:string -> (unit -> bool) -> t

  val test_state :
    name:string ->
    setup:(unit -> 'a) ->
    teardown:('a -> unit) -> 
    ('a -> bool) ->
    t

  val run : t -> result
  val print : Format.formatter -> result -> unit
  val go : Format.formatter -> t -> unit
end

To define a test, you use the functions test and test_state. 'test'
just takes a name and a function that returns a boolean condition:

  let t1 =
    Test.test
      ~name: "Addition test"
      (fun () -> 4 = 2 + 2)

If you want to define a test for functions that create and use some
custom state, do something like:

  let t2 = 
    Test.test_state
     ~name: "Dereferencing a pointer test"
     ~setup: (fun () -> ref 13)
     ~teardown: (fun r -> r := 0) (* Pointlessly zero out the ref cell. *)
     (fun r -> !r = 13)

The 'run' function runs a test and returns a result. A result is
either a Pass or a Fail, depending on whether the test body returns
true or false, or it can be Broken, indicating an unhandled exception
out of the test.

The 'print' function prints out a result to a stream, and 'go' is just
a convenience function that both runs and prints a test.

  # Test.go t2;; (* At the REPL *)
  pass: Dereferencing a pointer test

To be less minimalist, you'd want to have test suites (a la Kent Beck)
to treat groups of tests as a test, but but that's all easy to add. I
use something that has a few more features, but not that many more.

I use it by taking the snippets of code I enter into the REPL to see
what the functions are doing and turn them directly into my unit
tests. I imagine Scheme and CL programmers work pretty much the same
way.

Here's the implementation:

module Test : TEST =
struct
  type result = Pass of string | Fail of string | Broke of string * exn

  type t = {
    name : string;
    runner : unit -> result
  }

  let runner name setup teardown body () =
    try
      let state = setup() in
      let b = body state in
      let () = teardown state in
	if b then Pass name else Fail name
    with
	e -> Broke (name, e)

  let test ~name body =
    let noop = fun () -> () in
      {name = name; runner = runner name noop noop body}

  let test_state ~name ~setup ~teardown body =
      {name = name; runner = runner name setup teardown body}

  let run t = t.runner()

  let print out result =
    let print fmt = Format.fprintf out fmt in
      match result with
	| Pass name -> print "pass: %s\n" name
	| Fail name -> print "Fail: %s\n" name
	| Broke (name, exn) ->
	    print "BROKEN: %s threw %s\n" name (Printexc.to_string exn)

  let go out t = print out (run t)
end


-- 
Neel Krishnaswami
·····@cs.cmu.edu
From: Peter Seibel
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <m2zmr8s3z9.fsf@gigamonkeys.com>
Neelakantan Krishnaswami <·····@cs.cmu.edu> writes:

> In article <··············@gigamonkeys.com>, Peter Seibel wrote:
>> 
>> Uh, I'm not sure I get that last bit of logic. But nevermind. Anyway,
>> if you know any OCamlers (Ocamlites?) who actually believe in testing
>> their code for errors other than type errors and who might be up for
>> the challenge, please do point them to my previous post--I'd love to
>> see an Ocaml (or Haskell or SML or whatever) implementation of a
>> simple unit test framework.
>
> Here's the signature of a very minimalist unit test framework in
> Ocaml.

Interesting. I'll take a closer look at this when I have a bit more
time. Thanks.

-Peter

-- 
Peter Seibel           * ·····@gigamonkeys.com
Gigamonkeys Consulting * http://www.gigamonkeys.com/
Practical Common Lisp  * http://www.gigamonkeys.com/book/
From: André Thieme
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <deb676$aa4$1@ulric.tng.de>
Jon Harrop schrieb:
> Andr� Thieme wrote:
> 
>>Jon Harrop schrieb:
>>
>>>>What you did not do
>>>>was to mention how the pure functional programming style can complicate
>>>>things.
>>>
>>>If an imperative style is better, then use an imperative style. This can
>>>be done directly in ML or indirectly using monads in Haskell.
>>
>>If you want to learn Lisp do it and then compare how easy and natural
>>you find it to use imperative style.
> 
> 
> But would you use an imperative style to do that in Lisp? I'd have expected
> a functional style with nested definitions, like the ML.

Would use an imperative style for what?
I just wanted to point out that programs written in ML or OCaml are
typically written in a functional style. Whenever a language is centered
on one paradigm it can get hairy when you face problems that could be
solved better by using a different one.
One thing I like about Lisp is that it is a true multi paradigm language.

Just take this simple example:

(defun foo (n)
   (lambda (i) (incf n i)))

This is a simple function that generates an accumulator function.
It is different than an adder function cause it changes the value of n.
Example:


(defvar bar (foo 10))
(funcall bar 5)  => 15
(funcall bar 2)  => 17

(defvar baz (foo -8))
(funcall bar 9)    => 1
(funcall bar 100)  => 101
(funcall bar 9)    => 110

It is different from an adder function:
(defun foo (n) (lambda (i) (+ n i)))
(setf bar (foo 10))
(funcall bar 9)  =>  19
(funcall bar 2)  =>  12


I don't know how to do it in ML, it is too long ago since I learned it.
To do this in OCaml I guess one needs to trick around with the ref type.


>>>>How would you implement an Unit Test Framework in ML in 16 lines (not
>>>>counting comments and blank lines?) which can be used for some simple
>>>>programs?
>>>
>>>Can you describe what that is?
>>
>>Peter already posted the link, but in the case you missed that posting:
>>
> 
> http://www.gigamonkeys.com/book/practical-building-a-unit-test-framework.html
> 
> I read that page but it doesn't give a definition of the term. If my
> inference is correct then I'd probably write this kind of thing using HOFs
> in OCaml rather than macros. There are probably some cases that couldn't be
> handled like that but I'm not sure what.

There is no single case that couldn't be handeled by OCaml as it is
turing complete like Lisp. My question is if you get the same or even
more functionality with the same amount of work in ML or OCaml.

[Aside from that: you might be interested to do some reading about Unit
Testing Frameworks. If it is more or less work to write one in OCmal it
probably is worth the time. TDD (Test Driven Development) might speed up
the work in your company.]


>>And then again, couldn't it be a bit premature to judge the performance
>>of Lisp after you have seen not much more than a raytracer?
> 
> 
> I've looked up other results. Performance comparisons seem to erupt on this
> newsgroup every so often, typically comparing with C/C++. The results are
> very varied but one observation is that it takes a lot of work and know-how
> to optimise Lisp code.

It really isn't too easy to optimize code in Lisp. There is still some
work that can be done to get better compilers.



>>Why not rewrite your raytracer in assembler and enjoy the 5x speed up?
> 
> 
> The Lisp programs are being compared to unoptimised code in the other
> languages. There are plenty of optimisations to be applied before resorting
> to assembler.

When we did a raytracer in highly optimized C the assembler version was
eight times faster. Only the intel compiler was able to produce code
that was closer to the assembler version which still ran five times faster.


Andr�
-- 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43099f6d$0$17485$ed2e19e4@ptn-nntp-reader04.plus.net>
Andr� Thieme wrote:
> Jon Harrop schrieb:
>> But would you use an imperative style to do that in Lisp? I'd have
>> expected a functional style with nested definitions, like the ML.
> 
> Would use an imperative style for what?

You asked how I would use an imperative style to share 12 variables across
several functions. My answer is that I wouldn't use an imperative style,
I'd pass them to an outer, nested function.

> I just wanted to point out that programs written in ML or OCaml are
> typically written in a functional style.

ML programs are typically written in a more functional style than C
programs, of course, but it is very easy to use imperative features. I use
both functional and imperative style all the time. Imperative style is
great for things like state-based GUIs.

> Whenever a language is centered 
> on one paradigm it can get hairy when you face problems that could be
> solved better by using a different one.

I wouldn't say that they are centered on one paradigm, but this is getting
very subjective.

> Just take this simple example:
> 
> (defun foo (n)
>    (lambda (i) (incf n i)))

This is:

# let foo n i =
    n := !n + i;;
val foo : int ref -> int -> unit = <fun>

> This is a simple function that generates an accumulator function.
> It is different than an adder function cause it changes the value of n.
> Example:
> 
> (defvar bar (foo 10))
> (funcall bar 5)  => 15
> (funcall bar 2)  => 17

# let bar = foo (ref 10);;
val bar : int -> unit = <fun>
# bar 5;;
- : unit = ()
# bar 2;;
- : unit = ()

> It is different from an adder function:
> (defun foo (n) (lambda (i) (+ n i)))
> (setf bar (foo 10))
> (funcall bar 9)  =>  19
> (funcall bar 2)  =>  12

Yes:

# let foo = ( + );;
val foo : int -> int -> int = <fun>
# let bar = foo 10;;
val bar : int -> int = <fun>
# bar 9;;
- : int = 19
# bar 2;;
- : int = 12

> I don't know how to do it in ML, it is too long ago since I learned it.
> To do this in OCaml I guess one needs to trick around with the ref type.

Exactly.

[unit testing]
>> I read that page but it doesn't give a definition of the term. If my
>> inference is correct then I'd probably write this kind of thing using
>> HOFs in OCaml rather than macros. There are probably some cases that
>> couldn't be handled like that but I'm not sure what.
> 
> There is no single case that couldn't be handeled by OCaml as it is
> turing complete like Lisp. My question is if you get the same or even
> more functionality with the same amount of work in ML or OCaml.
> 
> [Aside from that: you might be interested to do some reading about Unit
> Testing Frameworks. If it is more or less work to write one in OCmal it
> probably is worth the time. TDD (Test Driven Development) might speed up
> the work in your company.]

It may be similar to something that I'm doing ATM. I'm writing a GUI for our
presentation software. The data structures and algorithms for editing the
document are all purely functional (this has many advantages). They are
mostly proven run-time-error-free by the static type checker but there are
still places that run-time errors can occur.

Let's say some combination of key presses might break the software. So I
wrote a little program to randomly generate actions (normally generated by
the GUI to edit the document). The documents that my program then types are
exactly interesting but the longer I run it, the more confidence I have
that it won't produce run-time errors when in production. Is that what you
mean by unit testing?

>> I've looked up other results. Performance comparisons seem to erupt on
>> this newsgroup every so often, typically comparing with C/C++. The
>> results are very varied but one observation is that it takes a lot of
>> work and know-how to optimise Lisp code.
> 
> It really isn't too easy to optimize code in Lisp. There is still some
> work that can be done to get better compilers.

Yes, that is the impression I get. Most people started off with 100x slower
Lisp versions of the ray tracer and optimised to about 5x slower. Only one
person managed to get within 2x slower (he got 1.2x slower!) but his code
is three times as long as the OCaml.

Now I have the problem that, if I put code on our site, which of the Lisp
programs do I choose?

>> The Lisp programs are being compared to unoptimised code in the other
>> languages. There are plenty of optimisations to be applied before
>> resorting to assembler.
> 
> When we did a raytracer in highly optimized C the assembler version was
> eight times faster. Only the intel compiler was able to produce code
> that was closer to the assembler version which still ran five times
> faster.

Wow! Ok, mine is nothing like as optimised as that. :-)

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Nathan Baum
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dedggv$219$1@newsg2.svr.pol.co.uk>
Jon Harrop wrote:
>>Just take this simple example:
>>
>>(defun foo (n)
>>   (lambda (i) (incf n i)))
> 
> 
> This is:
> 
> # let foo n i =
>     n := !n + i;;
> val foo : int ref -> int -> unit = <fun>

Not quite. INCF also returns the incremented value, which means the foo 
function returns the incremented value. What you want is

let foo n i =
   n := !n + i; !n;;

OCaml's main Big Win here is that currying makes higher-order functions 
notably more concise.

I'd make a macro which would work as the following:

* (defcurry foo (n i)
     (incf n i))
FOO
* (foo)
#<FUNCTION FOO>
* (foo 7)
#<CLOSURE (LAMBDA (I)) {DEADBEEF}>
* (let ((f (foo 7))
     (list (funcall foo 2) (funcall foo 2)))
(9 11)
* (foo 7 12)
19

> Let's say some combination of key presses might break the software. So I
> wrote a little program to randomly generate actions (normally generated by
> the GUI to edit the document). The documents that my program then types are
> exactly interesting but the longer I run it, the more confidence I have
> that it won't produce run-time errors when in production. Is that what you
> mean by unit testing?

_My_ understanding of unit testing is that one individually tests the 
_units_ of the software as you make them. Units might be functions, 
files, packages or whole systems.

The idea is to write a set of tests which ensure that a given unit 
performs the correct actions, and then make sure that when you modify a 
given unit, it and all its dependent units can be retested automatically.

You might have many thousands of units, with tens of tests each. Running 
_all_ tests every time something changes is really not practical.

But other people might mean something different by it.
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddtnc2$5ia$1@nwrdmz03.dmz.ncs.ea.ibs-infra.bt.com>
"Jon Harrop" <······@jdh30.plus.com> wrote in message 
······························@ptn-nntp-reader03.plus.net...
> Jamie Border wrote:
>> "Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message
>> ·······················@newscontent-01.sprint.ca...
>>> Jon Harrop wrote:
>>>> ...
>> JH>> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically,
>> Lisp and
>> JH>> Scheme programs are virtually unreadable unless the parentheses are
>> JH>> staggered by spreading expressions over several lines and using an
>> JH>> automatic indenter. So if I were to put a Lisp implementation of the
>> ray
>> JH>> tracer on my site then I'd either state that, or I'd give results
>> using JH>> some other measure of verbosity, like characters.
>>
>> Hmm.  What would you (JH) be measuring here?
>>
>> a) Keystrokes required to produce the code (see below, though)
>> b) Some kind of 'intrinsic verbosity', which would require some *serious*
>> thinking about idiomaticity, relevance of formatting and massive, massive
>> sampling to make it statistically relevant.
>
> Both. As you say, it is so inherently flawed that there is little point
> wasting time thinking about it. For the time being, I don't believe LOC 
> can
> be significantly improved upon.

How would you enforce formatting for your hypothetical LOC measurements?

Compare:

(defun foo (some-number)
  (list (1- some-number)
        some-number
        (1+ some-number)))

with something like:


int* foo (int num) {int* temp = (int*)malloc(3 * sizeof(int); temp[0] = 
num-1; temp[1] = num; temp[2] = num+1; return temp; }


Example 1 has 2 LOC.  Example 2 has 1 LOC.  Does this mean C 'wins' here?

>
>>> so character count might penalize lisp even worse.  otoh, the lengthy
>>> identifiers make lisp code quite easy to read and understand.
>>
>> Yes, and using a decent editor with auto-completion (Emacs) means that I
>> hit less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B <META-TAB>
>> ) than you might think.
>>
>> Oh, and all the ')))))' you see probably didn't get typed by hand (
>> <META-RET> closes all open parens).
>
> Cool. :-)

Incidentally I argued against the verbosity of CL a while ago, when I was 
green.  Now I've written more code, I know better.

At 2am I can understand MULTIPLE-VALUE-BIND better than MVB.  Verbosity is 
good.

>
>>> token count probably would be better
>>
>> Yep, although (because I am biased) I would like to see
>> 'keystroke/mouse-click' count instead.  I think that with the requirement
>> for idiomatic variable naming, CL might not come out as 'verbose' as you
>> think...
>
> Is Lisp code not made less maintainable because of all those brackets?

No.  It is *more* maintainable.  I am new (a few months) to CL, and I can 
fix my code faster* than with C (seven years).

Difficult to explain really, but I get a visceral sense of 'stack pop' as I 
type the closing parens.

This is probably due to Emacs highlighting each open-paren that my typed 
close-paren balances.

What I am trying to say is that I get this feeling for the *whole* language, 
whereas with C, i see curly braces, square brackets, periods, 
hypen-greater-than, ampersand, etc, etc.

My thinking goes "erm-close-squre-bracket erm-close-paren erm-semicolon" 
instead of "done-done-done"

Of course, it would be absurd to suggest that C code is less maintainable 
because of all that _syntax_...  Wouldn't it?  :-)

Jamie

* This is a lie: sometimes I just stare at it for hours, because I am still 
adjusting to the CL way of thinking.  But this happens less and less often. 
If I compare with the transition from C++ to C# (the most recent addition, I 
guess), I am winning *big time* with Common Lisp.

>
> -- 
> Dr Jon D Harrop, Flying Frog Consultancy
> http://www.ffconsultancy.com 
From: Greg Buchholz
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124236612.958412.249520@f14g2000cwb.googlegroups.com>
Jamie Border wrote:
>int* foo (int num) {int* temp = (int*)malloc(3 * sizeof(int); temp[0] = num-1; temp[1] = num; temp[2] = num+1; return temp; }

Should be able to chop a few characters off of that...

int* foo(int n)
{memcpy(malloc(3*sizeof(int)),(int[]){n-1,n,n+1},3*sizeof(int));}
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddutlf$se4$1@nwrdmz03.dmz.ncs.ea.ibs-infra.bt.com>
"Greg Buchholz" <················@yahoo.com> wrote in message 
·····························@f14g2000cwb.googlegroups.com...
> Jamie Border wrote:
>>int* foo (int num) {int* temp = (int*)malloc(3 * sizeof(int); temp[0] = 
>>num-1; temp[1] = num; temp[2] = num+1; return temp; }
>
> Should be able to chop a few characters off of that...

Yes, but I was trying to lose in every way that matters (# tokens, # 
characters, amount of syntax, etc), and win in the one way that makes no 
odds (# LOC).

>
> int* foo(int n)
> {memcpy(malloc(3*sizeof(int)),(int[]){n-1,n,n+1},3*sizeof(int));}
>

That notwithstanding, the above is very neat :-)

Jamie 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43026f49$0$17486$ed2e19e4@ptn-nntp-reader04.plus.net>
Jamie Border wrote:
> How would you enforce formatting for your hypothetical LOC measurements?

I would make no attempt to do so beyond disciplining myself to write in a
natural style.

> Compare:
> 
> (defun foo (some-number)
>   (list (1- some-number)
>         some-number
>         (1+ some-number)))
> 
> with something like:
> 
> int* foo (int num) {int* temp = (int*)malloc(3 * sizeof(int); temp[0] =
> num-1; temp[1] = num; temp[2] = num+1; return temp; }
> 
> 
> Example 1 has 2 LOC.  Example 2 has 1 LOC.  Does this mean C 'wins' here?

Yes, exactly. But the C isn't in my natural style.

The problem with Lisp and Scheme is that I am a total newbie and have no
idea how Lisp code is normally formatted. C++ and OCaml I know. SML I
thought I knew but had a big debate on c.l.functional about it. And I
assumed that Java = C++ for formatting.

>> Is Lisp code not made less maintainable because of all those brackets?
> 
> No.  It is *more* maintainable.  I am new (a few months) to CL, and I can
> fix my code faster* than with C (seven years).

Sure. I am in the same situation with C++ and OCaml. But I want to know how
ML and Lisp compare. Syntax is clearly one of the major differences between
Lisp and ML.

> Difficult to explain really, but I get a visceral sense of 'stack pop' as
> I type the closing parens.

Interesting. :-)

> This is probably due to Emacs highlighting each open-paren that my typed
> close-paren balances.

Yep. That's a real life saver.

> My thinking goes "erm-close-squre-bracket erm-close-paren erm-semicolon"
> instead of "done-done-done"
> 
> Of course, it would be absurd to suggest that C code is less maintainable
> because of all that _syntax_...  Wouldn't it?  :-)

No, not at all. Both sides of the argument have valid justifications, IMHO.

> * This is a lie: sometimes I just stare at it for hours, because I am
> still
> adjusting to the CL way of thinking.  But this happens less and less
> often. If I compare with the transition from C++ to C# (the most recent
> addition, I guess), I am winning *big time* with Common Lisp.

I can well believe that.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <ddutgk$sd3$1@nwrdmz02.dmz.ncs.ea.ibs-infra.bt.com>
"Jon Harrop" <······@jdh30.plus.com> wrote in message 
······························@ptn-nntp-reader04.plus.net...
> Jamie Border wrote:
>> How would you enforce formatting for your hypothetical LOC measurements?
>
> I would make no attempt to do so beyond disciplining myself to write in a
> natural style.
>
>> Compare:
>>
>> (defun foo (some-number)
>>   (list (1- some-number)
>>         some-number
>>         (1+ some-number)))
>>
>> with something like:
>>
>> int* foo (int num) {int* temp = (int*)malloc(3 * sizeof(int); temp[0] =
>> num-1; temp[1] = num; temp[2] = num+1; return temp; }
>>
>>
>> Example 1 has 2 LOC.  Example 2 has 1 LOC.  Does this mean C 'wins' here?
>
> Yes, exactly. But the C isn't in my natural style.

My point exactly.  Whose 'style' should be used for a LOC comparison?

I could format source equally badly for any other language, thereby fatally 
skewing your analyses.

How do you seek to avoid this?


>
> The problem with Lisp and Scheme is that I am a total newbie and have no
> idea how Lisp code is normally formatted. C++ and OCaml I know. SML I
> thought I knew but had a big debate on c.l.functional about it. And I
> assumed that Java = C++ for formatting.
>
>>> Is Lisp code not made less maintainable because of all those brackets?
>>
>> No.  It is *more* maintainable.  I am new (a few months) to CL, and I can
>> fix my code faster* than with C (seven years).
>
> Sure. I am in the same situation with C++ and OCaml. But I want to know 
> how
> ML and Lisp compare. Syntax is clearly one of the major differences 
> between
> Lisp and ML.
>
>> Difficult to explain really, but I get a visceral sense of 'stack pop' as
>> I type the closing parens.
>
> Interesting. :-)
>
>> This is probably due to Emacs highlighting each open-paren that my typed
>> close-paren balances.
>
> Yep. That's a real life saver.
>
>> My thinking goes "erm-close-squre-bracket erm-close-paren erm-semicolon"
>> instead of "done-done-done"
>>
>> Of course, it would be absurd to suggest that C code is less maintainable
>> because of all that _syntax_...  Wouldn't it?  :-)
>
> No, not at all. Both sides of the argument have valid justifications, 
> IMHO.

Indeed.  I think probably this statement is the problem I have with the 
'shootout'.

If your comparison was telling me:

* For low-level hardware access -  ASM, C,...
* ...
* For high-level nondeterministic programming: Prolog,Lisp,...

I would probably be experiencing less friction.


>
>> * This is a lie: sometimes I just stare at it for hours, because I am
>> still
>> adjusting to the CL way of thinking.  But this happens less and less
>> often. If I compare with the transition from C++ to C# (the most recent
>> addition, I guess), I am winning *big time* with Common Lisp.
>
> I can well believe that.
>
> -- 
> Dr Jon D Harrop, Flying Frog Consultancy
> http://www.ffconsultancy.com 
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <43034cbd$0$97124$ed2619ec@ptn-nntp-reader03.plus.net>
Jamie Border wrote:
> "Jon Harrop" <······@jdh30.plus.com> wrote in message
> ······························@ptn-nntp-reader04.plus.net...
>> Yes, exactly. But the C isn't in my natural style.
> 
> My point exactly.  Whose 'style' should be used for a LOC comparison?

For my language comparison, my style.

> I could format source equally badly for any other language, thereby
> fatally skewing your analyses.

I would reformat it.

> How do you seek to avoid this?

Just common sense. I don't believe this can be significantly improved upon
so there is no point in over analysing it.

>>> Of course, it would be absurd to suggest that C code is less
>>> maintainable
>>> because of all that _syntax_...  Wouldn't it?  :-)
>>
>> No, not at all. Both sides of the argument have valid justifications,
>> IMHO.
> 
> Indeed.  I think probably this statement is the problem I have with the
> 'shootout'.

My main objection to the shootout is that most of the tests are trivially
reducible. So the optimisations performed by compilers has a huge effect on
performance but is mostly unrelated to the optimisation of real programs.

> If your comparison was telling me:
> 
> * For low-level hardware access -  ASM, C,...
> * ...
> * For high-level nondeterministic programming: Prolog,Lisp,...
> 
> I would probably be experiencing less friction.

My language comparison really only looks at the middle of the clarity vs
performance trade-off in the context of non-trivial algorithms.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com
From: Hartmann Schaffer
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <HOuMe.1658$Dd.7029@newscontent-01.sprint.ca>
Jamie Border wrote:
> "Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message 
> ·······················@newscontent-01.sprint.ca...
> 
>>Jon Harrop wrote:
>>
>>>...
> 
> JH>> However, LOC overly penalises Lisp and Scheme, IMHO. Specifically, Lisp 
> and
> JH>> Scheme programs are virtually unreadable unless the parentheses are
> JH>> staggered by spreading expressions over several lines and using an
> JH>> automatic indenter. So if I were to put a Lisp implementation of the 
> ray
> JH>> tracer on my site then I'd either state that, or I'd give results using
> JH>> some other measure of verbosity, like characters.
> 
> Hmm.  What would you (JH) be measuring here?
> 
> a) Keystrokes required to produce the code (see below, though)
> b) Some kind of 'intrinsic verbosity', which would require some *serious* 
> thinking about idiomaticity, relevance of formatting and massive, massive 
> sampling to make it statistically relevant.
> 
> 
> 
>>i doubt lisp or scheme will gain anything there:  the language defined 
>>words tend to be quite lengthy, and afaict that seems to encourage 
>>programmers to use pretty length identifiers for their own identifiers,
> 
> 
> Yes
> 
> 
>>so character count might penalize lisp even worse.  otoh, the lengthy 
>>identifiers make lisp code quite easy to read and understand.
> 
> 
> Yes, and using a decent editor with auto-completion (Emacs) means that I hit 
> less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B <META-TAB> ) than 
> you might think.
> 
> Oh, and all the ')))))' you see probably didn't get typed by hand ( 
> <META-RET> closes all open parens).
> 
> 
>>token count probably would be better
> 
> 
> Yep, although (because I am biased) I would like to see 
> 'keystroke/mouse-click' count instead.  I think that with the requirement 
> for idiomatic variable naming, CL might not come out as 'verbose' as you 
> think...

i was considering pointing out that most lisp development environments 
have features that would reduce the amount of keystrokes required to 
type in the program, but decided against it because i suspect that that 
is harder to measure than a token count

hs
From: Jamie Border
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <dduu2r$hr$1@nwrdmz02.dmz.ncs.ea.ibs-infra.bt.com>
"Hartmann Schaffer" <··@hartmann.schaffernet> wrote in message 
·······················@newscontent-01.sprint.ca...
[snip]

JB>> Yes, and using a decent editor with auto-completion (Emacs) means that 
I hit
JB>> less keys to produce the token 'DESTRUCTURING-BIND' ( DE-B <META-TAB> ) 
than
JB>> you might think.

[snip]

> i was considering pointing out that most lisp development environments 
> have features that would reduce the amount of keystrokes required to type 
> in the program, but decided against it because i suspect that that is 
> harder to measure than a token count

It undoubtedly is, but I would find it _very_ interesting to compare the 
following environments in this way:

Common Lisp + Emacs + SLIME
Java + Eclipse
C# + Visual Studio.NET
C++ + Visual Studio 6

Perhaps it is more-or-less meaningless to compare the language alone without 
a good development tool?

For instance, although I could write C# in Notepad, I wouldn't want to.

Jamie

>
> hs 
From: Matthias Buelow
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <3m9r2qF15in5fU1@news.dfncis.de>
Ulrich Hobelmann <···········@web.de> wrote:

>I wouldn't consider 5 times as slow as a *functional* language very 
>competitive, but it might be fast enough for many problems.

For a runtime typed language vs. a statically typed one it is quite
competitive. Generally, the compiler can never resolve all the runtime
typing at compile time, unless you declare all and everything in your
program (which would be nothing but a major PITA).

mkb.
From: ·············@antenova.com
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124095431.560697.307610@f14g2000cwb.googlegroups.com>
Raffael Cavallaro wrote:
> On 2005-08-13 07:18:40 -0400, Jon Harrop <······@jdh30.plus.com> said:
>
> > Here is Nathan Baum's port for CMUCL and SBCL:
>
> just as an additional data point, this code runs in just over 6 seconds
> in sbcl 0.9.3 on a dual 2.0 GHz G5 (though sbcl only uses one
> processor).

I've also written a version of Jon's raytracer benchmark.  Unlike the
one given here I've used "simple-vector", to do the vectors.

I've also being using GCL to compile it so far.

It will be interesting to see how the performance compares.
From: Rob Thorpe
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1124181074.971832.182880@g14g2000cwa.googlegroups.com>
·············@antenova.com wrote:
> Raffael Cavallaro wrote:
> > On 2005-08-13 07:18:40 -0400, Jon Harrop <······@jdh30.plus.com> said:
> >
> > > Here is Nathan Baum's port for CMUCL and SBCL:
> >
> > just as an additional data point, this code runs in just over 6 seconds
> > in sbcl 0.9.3 on a dual 2.0 GHz G5 (though sbcl only uses one
> > processor).
>
> I've also written a version of Jon's raytracer benchmark.  Unlike the
> one given here I've used "simple-vector", to do the vectors.
>
> I've also being using GCL to compile it so far.
>
> It will be interesting to see how the performance compares.

Here is my version.  For some reason it doesn't work, the "g" variable
sticks at 0.5.  Have I missed something obvious, can anyone see what's
wrong with it?


; Jon Harrops little raytracer
; With bits from Jan Van Lint's version and Nathan Baum's version
; Currently not working

(defconstant delta (sqrt long-float-epsilon))
(defconstant infinity most-positive-long-float)

(defun v* (s r) (map 'simple-vector #'(lambda (x) (* s x)) r))
(defun v+ (a b) (map 'simple-vector #'+ a b))
(defun v- (a b) (map 'simple-vector #'- a b))
(defun dot (a b) (apply #'+ (map 'list #'* a b)))
(defun unitise (r) (v* (/ (sqrt (dot r r))) r))

(defstruct ray orig dir)
(defstruct sphere center rad)
(defstruct group sphere scenes)

(defun ray-sphere (ray sph)
  (let* (t1 t2
	 (v (v- (sphere-center sph) (ray-orig ray)))
	 (b (dot v (ray-dir ray)))
	 (disc (+ (- (* b b) (dot v v)) (expt (sphere-rad sph) 2))))
    (if (minusp disc) infinity
      (progn (setq disc (sqrt disc))
	     (if (minusp (setq t2 (+ b disc))) infinity
	       (progn (setq t1 (- b disc))
		      (if (plusp t1) t1 t2)))))))

(defun intersect (ray scene)
  (labels
   ((lp (hit scene) ; car hit is a lam, cdr a simple-vector normal
	(if (sphere-p scene)
	    (let ((lam (ray-sphere ray scene)))
	      (if (>= lam (car hit)) hit
		(cons lam (unitise
			   (v- (v+ (ray-orig ray) (v* lam (ray-dir ray)))
			       (sphere-center scene))))))
	  (if (group-p scene)
	      (if (>= (ray-sphere ray (group-sphere scene)) (car hit)) hit
		(reduce #'lp (group-scenes scene) :initial-value hit))
	    (error "not a group or sphere in intersect")))))
   (lp (cons infinity #(0.0 0.0 0.0)) scene)))

(defun ray-trace (light ray scene)
  (let* ((hit (intersect ray scene))
	 (lam (car hit))
	 (normal (cdr hit)))
    (if (= lam infinity) 0.0
      (let (g (dot normal light))
       (if (plusp g) 0.0
	 (let ((p (v+ (v+ (ray-orig ray) (v* lam (ray-dir ray)))
		      (v* delta normal))))
	  (if (< (car (intersect (make-ray :orig p :dir (v* -1.0 light))
				 scene)) infinity) 0.0 (- g))))))))

(defun create (n c r)
  (let ((obj (make-sphere :center c :rad r)))
    (if (= n 1) obj
      (let ((rt (* 3.0 (/ r (sqrt 12.0)))))
	(labels ((aux (x z) (create (1- n) (v+ c (vector x rt z)) (* 0.5 r))))
	  (make-group :sphere (make-sphere :center c :rad (* 3.0 r))
		      :scenes (list obj (aux (- rt) (- rt)) (aux rt (- rt))
				    (aux (- rt) rt) (aux rt rt))))))))

(defun main ()
  (let* ((level 6)
	 (n 512) (ss 4) (light (unitise #(-1.0 -3.0 2.0)))
	 (scene (create level #(0.0 -1.0 0.0) 1)))
    (format *terminal-io* "P5~%~D ~D ~%255~%" n n)
    (do ((y (1- n) (1- y))) ((< y 0))
      (do ((x 0 (1+ x))) ((>= x n))
	(let ((g 0))
	  (dotimes (dx ss)
	    (dotimes (dy ss)
	      (let* ((aux (lambda (x d) (+ (- x (/ n 2)) (/ d ss))))
		     (d (unitise (vector (funcall aux x dx)
					 (funcall aux y dy) n))))
		(incf g (ray-trace
			 light
			 (make-ray :orig #(0.0 0.0 -4.0) :dir d) scene)))))
	  (setq g (+ 0.5 (* 255.0 (/ g (* ss ss)))))
	  (write-byte (floor g) *terminal-io*))))))
From: Joe Marshall
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <1x4uukos.fsf@ccs.neu.edu>
"Rob Thorpe" <·············@antenova.com> writes:

> Here is my version.  For some reason it doesn't work, the "g" variable
> sticks at 0.5.  Have I missed something obvious, can anyone see what's
> wrong with it?

Try setting delta to something *much* larger than long-float-epsilon.
From: Jon Harrop
Subject: Re: Very poor Lisp performance
Date: 
Message-ID: <42fd65a5$0$1317$ed2619ec@ptn-nntp-reader02.plus.net>
Jon Harrop wrote:
> ... when
> I try to run their programs with either CMUCL or SBCL they are two orders
> of magnitude slower.

Slight correction - CMUCL is only 1 order of magnitude slower.

Also, I just tried running the ackermann and harmonic tests from the
shootout and they run at the same (fast) speed on my machine as on the
shootout's machine. So it seems the problem is specific to raytracing-like
code.

I'm completely stumped.

-- 
Dr Jon D Harrop, Flying Frog Consultancy
http://www.ffconsultancy.com