Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ jobs:
fpm-version: "v0.10.1"
- uses: actions/checkout@v4
- name: Compile
run: fpm build --profile debug
run: fpm build --profile debug --flag -ffree-line-length-none
- name: Test
run: fpm test --profile debug
run: fpm test --profile debug --flag -ffree-line-length-none

gnu-fpm-release:
name: gnu-fpm-release
Expand All @@ -64,6 +64,6 @@ jobs:
fpm-version: "v0.10.1"
- uses: actions/checkout@v4
- name: Compile
run: fpm build --profile release
run: fpm build --profile release --flag -ffree-line-length-none
- name: Test
run: fpm test --profile release
run: fpm test --profile release --flag -ffree-line-length-none
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,20 @@ in parallel, respectively:
fpm build --compiler caf --profile release --flag "-cpp -DPARALLEL"
```

An experimental capability exists for parallel runs when building with LLVM `flang-new`
version 22 or later and [Caffeine](https://go.lbl.gov/caffeine). Steps for installing
LLVM 22.0.0git (the llvm-project main branch as of this writing) and Caffeine are
outlined in [parallel-testing-with-flang.md]. Once installed, an `fpm` command of the
following form should launch the neural-fortran test suite with two executing images:

```
GASNET_PSHM_NODES=2 \
fpm test \
--compiler flang-new \
--flag "-O3 -fcoarray -DPARALLEL" \
--link-flag "-lcaffeine -lgasnet-smp-seq -L<caffeine-install-prefix>/lib -L<gasnet-install-prefix>/lib"
```

#### Testing with fpm

```
Expand Down Expand Up @@ -305,3 +319,5 @@ group.
Neural-fortran has been used successfully in over a dozen published studies.
See all papers that cite it
[here](https://scholar.google.com/scholar?cites=7315840714744905948).

https://github.com/BerkeleyLab/julienne/blob/e9f7ea8069206bfc4abf6a9e6dbbd7d07bda075a/doc/parallel-testing-with-flang.md
3 changes: 3 additions & 0 deletions fpm.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,8 @@ author = "Milan Curcic"
maintainer = "[email protected]"
copyright = "Copyright 2018-2025, neural-fortran contributors"

[dev-dependencies]
julienne = {git = "https://github.com/berkeleylab/julienne", tag = "3.2.1"}

[preprocess]
[preprocess.cpp]
11 changes: 11 additions & 0 deletions test/driver.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
program test_suite_driver
use julienne_m, only : test_fixture_t, test_harness_t
use linear_2d_layer_test_m, only : linear_2d_layer_test_t
implicit none

associate(test_harness => test_harness_t([ &
test_fixture_t(linear_2d_layer_test_t()) &
]))
call test_harness%report_results
end associate
end program test_suite_driver
120 changes: 120 additions & 0 deletions test/linear_2d_layer_test_m.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#include "language-support.F90"
! This include and the macros below are only required for gfortran versions older than 14.3
! because those versions lacked a Fortran 2008 feature that facilitates more concise code.

module linear_2d_layer_test_m
use nf_linear2d_layer, only: linear2d_layer
use julienne_m, only : &
test_t, test_description_t, test_diagnosis_t, test_result_t &
,operator(.equalsExpected.), operator(//), operator(.approximates.), operator(.within.), operator(.also.), operator(.all.)
#if ! HAVE_PROCEDURE_ACTUAL_FOR_POINTER_DUMMY
use julienne_m, only : diagnosis_function_i
#endif
implicit none

type, extends(test_t) :: linear_2d_layer_test_t
contains
procedure, nopass :: subject
procedure, nopass :: results
end type

contains

pure function subject() result(test_subject)
character(len=:), allocatable :: test_subject
test_subject = 'A linear_2d_layer'
end function

#if HAVE_PROCEDURE_ACTUAL_FOR_POINTER_DUMMY

function results() result(test_results)
type(linear_2d_layer_test_t) linear_2d_layer_test
type(test_result_t), allocatable :: test_results(:)
test_results = linear_2d_layer_test%run( &
[test_description_t('updating gradients', check_gradient_updates) &
])
end function

#else
! Work around a missing Fortran 2008 feature that was added to gfortran in version 14.3

function results() result(test_results)
type(linear_2d_layer_test_t) linear_2d_layer_test
type(test_result_t), allocatable :: test_results(:)
procedure(diagnosis_function_i), pointer :: &
check_gradient_updates_ptr => check_gradient_updates

test_results = linear_2d_layer_test%run( &
[test_description_t('updating gradients', check_gradient_updates_ptr) &
])
end function

#endif


function check_gradient_updates() result(test_diagnosis)
type(test_diagnosis_t) test_diagnosis

real :: input(3, 4) = reshape([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.11, 0.12], [3, 4])
real :: gradient(3, 2) = reshape([0.0, 10., 0.2, 3., 0.4, 1.], [3, 2])
type(linear2d_layer) :: linear
real, pointer :: w_ptr(:)
real, pointer :: b_ptr(:)

integer :: num_parameters
real, allocatable :: parameters(:) ! Remove the fixed size
real :: expected_parameters(10) = [&
0.100000001, 0.100000001, 0.100000001, 0.100000001, 0.100000001, 0.100000001, 0.100000001, 0.100000001,&
0.109999999, 0.109999999&
]
real :: gradients(10)
real :: expected_gradients(10) = [&
1.03999996, 4.09999990, 7.15999985, 1.12400007, 0.240000010, 1.56000006, 2.88000011, 2.86399961,&
10.1999998, 4.40000010&
]
real :: updated_parameters(10)
real :: updated_weights(8)
real :: updated_biases(2)
real :: expected_weights(8) = [&
0.203999996, 0.509999990, 0.816000044, 0.212400019, 0.124000005, 0.256000012, 0.388000011, 0.386399955&
]
real :: expected_biases(2) = [1.13000000, 0.550000012]

integer :: i
real, parameter :: tolerance = 0.

linear = linear2d_layer(out_features=2)
call linear % init([3, 4])
linear % weights = 0.1
linear % biases = 0.11
call linear % forward(input)
call linear % backward(input, gradient)
num_parameters = linear % get_num_params()

test_diagnosis = (num_parameters .equalsExpected. 10) // " (number of parameters)"

call linear % get_params_ptr(w_ptr, b_ptr) ! Change this_layer to linear
allocate(parameters(size(w_ptr) + size(b_ptr)))
parameters(1:size(w_ptr)) = w_ptr
parameters(size(w_ptr)+1:) = b_ptr
test_diagnosis = test_diagnosis .also. (.all. (parameters .approximates. expected_parameters .within. tolerance) // " (parameters)")

gradients = linear % get_gradients()
test_diagnosis = test_diagnosis .also. (.all. (gradients .approximates. expected_gradients .within. tolerance) // " (gradients)")

do i = 1, num_parameters
updated_parameters(i) = parameters(i) + 0.1 * gradients(i)
end do

call linear % get_params_ptr(w_ptr, b_ptr) ! Change this_layer to linear
w_ptr = updated_parameters(1:size(w_ptr))
b_ptr = updated_parameters(size(w_ptr)+1:)
updated_weights = reshape(linear % weights, shape(expected_weights))
test_diagnosis = test_diagnosis .also. (.all. (updated_weights .approximates. expected_weights .within. tolerance) // " (updated weights)")

updated_biases = linear % biases
test_diagnosis = test_diagnosis .also. (.all. (updated_biases .approximates. expected_biases .within. tolerance) // " (updated biases)")

end function

end module linear_2d_layer_test_m