Using Skala in Fortran with FTorch#

This page provides an example of how to use Skala in Fortran with the FTorch library. The example demonstrates how to load a Skala model, prepare input features, compute the exchange-correlation energy and potential, and access the results.

Setting up development environment#

For this example, we will be using mamba to manage our environment and dependencies.

environment.yml#
name: ftorch-dev
channels:
  - conda-forge
dependencies:
  # build requirements
  - fortran-compiler
  - c-compiler
  - cxx-compiler
  - cmake >=3.20,<4
  - ninja
  # host/runtime requirements
  - huggingface_hub
  - pytorch >=2.0 cpu_*

Create the environment and install dependencies:

mamba env create -n skala-dev -f environment.yml
mamba activate skala-dev

Build system setup#

For building the Fortran application, we will use CMake. We will use the following directory structure and files:

skala/
├── CMakeLists.txt
├── environment.yml
├── app/
│   └── main.f90
├── cmake/
│   ├── skala-dep-versions.cmake
│   └── skala-ftorch.cmake
└── src/
    ├── skala_ftorch.cxx
    └── skala_ftorch.f90

For the main CMakeLists.txt file, we will set up the project and include the necessary CMake modules for Skala and FTorch:

CMakeLists.txt#
cmake_minimum_required(VERSION 3.20 FATAL_ERROR)

project("Skala" LANGUAGES Fortran C CXX)

include(GNUInstallDirs)
include(FetchContent)
set(FETCHCONTENT_UPDATES_DISCONNECTED ON CACHE BOOL "Disable FC Updates")

list(PREPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)

include(skala-dep-versions)
include(skala-ftorch)
find_package(Torch REQUIRED)

add_executable(
  ${PROJECT_NAME}
  app/main.f90
  src/skala_ftorch.f90
  src/skala_ftorch.cxx
)
target_link_libraries(
  ${PROJECT_NAME}
  FTorch::ftorch
  ${TORCH_LIBRARIES}
)

install(
  TARGETS
  ${PROJECT_NAME}
  DESTINATION
  ${CMAKE_INSTALL_BINDIR}
)

list(REMOVE_AT CMAKE_MODULE_PATH 0)

To ensure that we have the correct versions of our dependencies, we will include a CMake module that specifies the versions of Skala and FTorch:

cmake/skala-dep-versions.cmake#
set(Skala_FTorch_URL "https://github.com/Cambridge-ICCS/FTorch/archive/refs/tags/v1.0.0.tar.gz")
set(Skala_FTorch_SHA256 "c4b6741e582623b7ecaecd59d02f779e8a6f6017f8068c85da8a034f468df375")

Next, we will include the CMake modules for Skala and FTorch, which will handle finding the libraries and setting up the necessary include directories and link targets:

cmake/skala-ftorch.cmake#
if(NOT DEFINED Skala_FTorch_URL)
  include(skala-dep-versions)
endif()
find_package(FTorch QUIET)
if(NOT FTorch_FOUND)
  include(FetchContent)

  message(STATUS "Could not find FTorch... Building FTorch from source")
  message(STATUS "FTorch URL: ${Skala_FTorch_URL}")

  FetchContent_Declare(
    ftorch
    URL ${Skala_FTorch_URL}
    URL_HASH SHA256=${Skala_FTorch_SHA256}
    DOWNLOAD_EXTRACT_TIMESTAMP ON
  )
  FetchContent_MakeAvailable(ftorch)
endif()

FTorch addons#

We create a number of extensions to the FTorch library in the src/ directory, which provide the necessary Fortran bindings to interact with Skala. However, we will not go into the details of these files here, as they are primarily focused on the Fortran-C++ interoperability.

Fortran bindings for Skala

The files skala_ftorch.cxx and skala_ftorch.f90 contain the C++ and Fortran code, respectively, that define the bindings between Skala and FTorch. These files include functions for loading Skala models, preparing input features, and computing exchange-correlation energies and potentials.

src/skala_ftorch.cxx#
#include <torch/script.h>
#include <torch/csrc/autograd/autograd.h>

typedef void* torch_jit_script_module_t;
typedef void* torch_tensor_t;
typedef int torch_device_t;

typedef void* skala_dict_t;
typedef void* skala_list_t;

typedef c10::Dict<std::string, std::vector<at::Tensor>> SkalaDict;
typedef std::vector<at::Tensor> SkalaList;
typedef enum SkalaFeature {
  Feature_Density = 1,
  Feature_Grad = 2,
  Feature_Kin = 3,
  Feature_GridCoords = 4,
  Feature_GridWeights = 5,
  Feature_Coarse0AtomicCoords = 6
} SkalaFeature;

static inline
void
ctorch_error(const std::string &message,
             const std::function<void()> &cleanup = nullptr) {
  std::cerr << "[ERROR]: " << message << std::endl;
  if (cleanup) {
    cleanup(); // Perform cleanup actions
  }
  exit(EXIT_FAILURE);
}

extern "C"
torch_tensor_t
skala_tensor_load(const char* filename)
{
  std::ifstream input(std::string(filename), std::ios::binary);
  if (!input.is_open())
  {
    throw std::runtime_error("Failed to open feature file: " + std::string(filename));
  }
  std::vector<char> bytes(
      (std::istreambuf_iterator<char>(input)),
      (std::istreambuf_iterator<char>()));

  input.close();
  auto tensor = torch::jit::pickle_load(bytes).toTensor().to(torch::Device(torch::kCPU));
  return new torch::Tensor(std::move(tensor));
}

extern "C"
torch_tensor_t
skala_tensor_sum(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  auto sum = t->sum();
  return new torch::Tensor(std::move(sum));
}

extern "C"
double
skala_tensor_item_double(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  return t->item<double>();
}

extern "C"
torch_jit_script_module_t
skala_model_load(const char *filename,
                 const bool requires_grad,
                 SkalaFeature* features) {
  torch::AutoGradMode enable_grad(requires_grad);
  torch::jit::ExtraFilesMap extra_files{{"features", ""}, {"protocol_version", ""}};
  torch::jit::script::Module *module = nullptr;
  try {
    module = new torch::jit::script::Module;
    *module =
        torch::jit::load(filename, torch::Device(torch::kCPU), extra_files);
  } catch (const std::exception &e) {
    ctorch_error(e.what(), [&]() { delete module; });
  }

  if (std::stoi(extra_files.at("protocol_version")) != 2) {
    std::string message = "Unsupported protocol version " + extra_files.at("protocol_version");
    ctorch_error(message, [&]() { delete module; });
  }

  auto feature_str = extra_files.at("features");
  // formatted as ["feature1", "feature2", ...] parse without using a full json library
  std::unordered_map<std::string, SkalaFeature> feature_keys;
  size_t pos = 0;
  while ((pos = feature_str.find('"', pos)) != std::string::npos) {
    size_t end_pos = feature_str.find('"', pos + 1);
    if (end_pos == std::string::npos) break;
    auto feature_key = feature_str.substr(pos + 1, end_pos - pos - 1);
    if (feature_key == "density") {
      feature_keys.insert({feature_key, Feature_Density});
    } else if (feature_key == "grad") {
      feature_keys.insert({feature_key, Feature_Grad});
    } else if (feature_key == "kin") {
      feature_keys.insert({feature_key, Feature_Kin});
    } else if (feature_key == "grid_coords") {
      feature_keys.insert({feature_key, Feature_GridCoords});
    } else if (feature_key == "grid_weights") {
      feature_keys.insert({feature_key, Feature_GridWeights});
    } else if (feature_key == "coarse_0_atomic_coords") {
      feature_keys.insert({feature_key, Feature_Coarse0AtomicCoords});
    }
    pos = end_pos + 1;
  }

  if (features != nullptr) {
    size_t i = 0;
    for (const auto &[key, value] : feature_keys) {
      features[i] = value;
      ++i;
    }
  }

  return module;
}

static inline
at::Tensor
skala_model_forward(torch::jit::script::Module module, const c10::Dict<std::string, at::Tensor> &features)
{
  std::vector<c10::IValue> args;
  std::unordered_map<std::string, c10::IValue> kwargs;
  kwargs["mol"] = features;
  return module.get_method("get_exc_density")(args, kwargs).toTensor();
}

extern "C"
void
skala_model_get_exc(torch_jit_script_module_t module, skala_dict_t input, torch_tensor_t* output)
{
  auto model = static_cast<torch::jit::script::Module *>(module);
  auto dict = static_cast<SkalaDict *>(input);

  c10::Dict<std::string, at::Tensor> features;
  for (const auto& entry : *dict) {
    auto tensor = torch::stack(entry.value());
    features.insert(entry.key(), tensor);
  }

  auto exc_on_grid = skala_model_forward(*model, features);
  *output = new at::Tensor(std::move(exc_on_grid));
}

extern "C"
void
skala_model_get_exc_and_vxc(torch_jit_script_module_t module, skala_dict_t input, torch_tensor_t* exc_output, skala_dict_t* grad_output)
{
  auto model = static_cast<torch::jit::script::Module *>(module);
  auto dict = static_cast<SkalaDict *>(input);

  std::vector<at::Tensor> input_tensors;
  std::vector<std::string> tensor_keys;
  c10::Dict<std::string, at::Tensor> features_with_grad;
  for (const auto& entry : *dict) {
    std::string key = entry.key();
    const auto& values = entry.value();
    std::vector<at::Tensor> tensors;
    for (const auto &value : values) {
      auto tensor_with_grad = value.clone().requires_grad_(true);
      tensors.push_back(tensor_with_grad);
      input_tensors.push_back(tensor_with_grad);
      tensor_keys.push_back(key);
    }
    features_with_grad.insert(key, torch::concat(tensors));
  }

  auto exc_on_grid = skala_model_forward(*model, features_with_grad);
  auto exc = (exc_on_grid * features_with_grad.at("grid_weights")).sum();

  auto grad_tensors = torch::autograd::grad(
      {exc},                  // outputs
      input_tensors,          // inputs
      /*grad_outputs=*/{},    // grad_outputs (defaults to ones)
      /*retain_graph=*/false, // retain_graph, necessary for higher-order grads
      /*create_graph=*/false, // create_graph, necessary for higher-order grads
      /*allow_unused=*/true   // allow_unused
  );

  std::unordered_map<std::string, std::vector<at::Tensor>> grad_map;
  for (size_t i = 0; i < tensor_keys.size(); ++i) {
    grad_map[tensor_keys[i]].push_back(grad_tensors[i]);
  }
  c10::Dict<std::string, std::vector<at::Tensor>> gradients;
  for (auto& [key, value] : grad_map) {
    gradients.insert(key, std::move(value));
  }

  *exc_output = new at::Tensor(std::move(exc_on_grid));
  *grad_output = new SkalaDict(std::move(gradients));
}

extern "C"
skala_dict_t
skala_dict_new()
{
  SkalaDict* input = new SkalaDict();
  return static_cast<skala_dict_t>(input);
}

extern "C"
void
skala_dict_insert(skala_dict_t input, const char* key, const torch_tensor_t* values, size_t size)
{
  auto dict = static_cast<SkalaDict *>(input);
  std::vector<at::Tensor> tensors;
  tensors.reserve(size);
  for (size_t i = 0; i < size; ++i) {
    auto tensor = static_cast<at::Tensor*>(values[i]);
    tensors.push_back(*tensor);
  }
  dict->insert(std::string(key), tensors);
}

extern "C"
skala_list_t
skala_dict_at(skala_dict_t input, const char* key)
{
  auto dict = static_cast<SkalaDict *>(input);
  if (!dict->contains(key)) {
    std::string message = "Key '" + std::string(key) + "' not found in SkalaDict";
    ctorch_error(message, []() {});
  }
  auto tensors = (*dict).at(key);
  if (tensors.empty()) {
    std::string message = "No tensors found for key '" + std::string(key) + "'";
    ctorch_error(message, []() {});
  }

  SkalaList* list = new SkalaList(std::move(tensors));
  return list;
}

extern "C"
size_t
skala_list_size(skala_list_t input)
{
  auto list = static_cast<SkalaList *>(input);
  return list->size();
}

extern "C"
torch_tensor_t
skala_list_at(skala_list_t input, size_t index)
{
  auto list = static_cast<SkalaList *>(input);
  if (index >= list->size()) {
    std::string message = "Index " + std::to_string(index) + " out of bounds for SkalaList of size " + std::to_string(list->size());
    ctorch_error(message, []() {});
  }
  auto tensor = (*list)[index];
  return new at::Tensor(std::move(tensor));
}

extern "C"
torch_tensor_t
skala_tensor_mul(const torch_tensor_t a, const torch_tensor_t b)
{
  auto ta = reinterpret_cast<torch::Tensor *>(a);
  auto tb = reinterpret_cast<torch::Tensor *>(b);
  auto result = (*ta) * (*tb);
  return new torch::Tensor(std::move(result));
}

extern "C"
torch_tensor_t
skala_tensor_mean(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  auto m = t->mean();
  return new torch::Tensor(std::move(m));
}

extern "C"
void*
skala_tensor_data_ptr(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  auto contiguous = t->contiguous().to(torch::kFloat64);
  // Replace the tensor in-place so the pointer stays valid
  *t = contiguous;
  return t->data_ptr();
}

extern "C"
int64_t
skala_tensor_ndim(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  return t->ndimension();
}

extern "C"
int64_t
skala_tensor_size(const torch_tensor_t tensor, int64_t dim)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  return t->size(dim);
}

extern "C"
int64_t
skala_tensor_numel(const torch_tensor_t tensor)
{
  auto t = reinterpret_cast<torch::Tensor *>(tensor);
  return t->numel();
}

extern "C"
void
skala_dict_delete(skala_dict_t input)
{
  if (input == nullptr) return;
  auto dict = static_cast<SkalaDict *>(input);
  delete dict;
}

extern "C"
void
skala_list_delete(skala_list_t input)
{
  if (input == nullptr) return;
  auto list = static_cast<SkalaList *>(input);
  delete list;
}
src/skala_ftorch.f90#
module skala_ftorch
  use iso_c_binding, only : c_ptr, c_char, c_int, c_int64_t, c_size_t, c_bool, &
    & c_null_ptr, c_null_char, c_double, c_f_pointer
  use ftorch, only : torch_model, torch_model_delete, torch_tensor
  implicit none
  private

  public :: skala_model, skala_model_load, skala_feature, skala_tensor_load, &
    & skala_tensor_sum, skala_tensor_mean, skala_tensor_mul, skala_tensor_item_double, &
    & skala_tensor_to_array, &
    & skala_tensor_numel, skala_tensor_ndim, skala_tensor_size, &
    & skala_dict, skala_dict_new

  interface skala_tensor_to_array
    module procedure skala_tensor_to_array_1d
    module procedure skala_tensor_to_array_2d
    module procedure skala_tensor_to_array_3d
  end interface skala_tensor_to_array

  type :: skala_feature_enum
    integer :: density = 1
    integer :: grad = 2
    integer :: kin = 3
    integer :: grid_coords = 4
    integer :: grid_weights = 5
    integer :: coarse_0_atomic_coords = 6
    integer :: max_feature = 6
  end type skala_feature_enum
  type(skala_feature_enum), parameter :: skala_feature = skala_feature_enum()

  type :: skala_dict
    type(c_ptr) :: p = c_null_ptr
  contains
    generic :: at => at_one, at_vec
    procedure, private :: at_one => skala_dict_at_one
    procedure, private :: at_vec => skala_dict_at_vec
    generic :: insert => insert_vec, insert_one
    procedure, private :: insert_vec => skala_dict_insert_vec
    procedure, private :: insert_one => skala_dict_insert_one
    final :: skala_dict_delete
  end type skala_dict

  type, extends(torch_model) :: skala_model
    integer(c_int), allocatable :: features(:)
  contains
    procedure :: get_exc => skala_model_get_exc
    procedure :: get_exc_and_vxc => skala_model_get_exc_and_vxc
    procedure :: needs_feature => skala_model_needs_feature
    final :: skala_model_delete
  end type skala_model

contains

  subroutine skala_model_load(model, path, requires_grad)
    type(skala_model), intent(out) :: model
    character(len=*), intent(in) :: path
    logical, intent(in), optional :: requires_grad

    interface
      function load_skala_model_c(path, requires_grad, features) result(model) &
        & bind(c, name="skala_model_load")
        import :: c_ptr, c_char, c_int, c_bool
        character(kind=c_char), intent(in) :: path(*)
        logical(c_bool), value :: requires_grad
        integer(c_int), intent(out) :: features(*)
        type(c_ptr) :: model
      end function load_skala_model_c
    end interface

    logical(c_bool) :: requires_grad_c
    integer(c_int) :: features(skala_feature%max_feature)

    requires_grad_c = .false.
    if (present(requires_grad)) requires_grad_c = requires_grad

    features(:) = 0
    model%p = load_skala_model_c(to_c_str(path), requires_grad_c, features)
    model%features = pack(features, features > 0)
  end subroutine skala_model_load

  subroutine skala_tensor_load(tensor, path)
    type(torch_tensor), intent(out) :: tensor
    character(len=*), intent(in) :: path

    interface
      function skala_tensor_load_c(path) result(tensor) bind(c, name="skala_tensor_load")
        import :: c_ptr, c_char
        character(kind=c_char), intent(in) :: path(*)
        type(c_ptr) :: tensor
      end function skala_tensor_load_c
    end interface

    tensor%p = skala_tensor_load_c(to_c_str(path))
  end subroutine skala_tensor_load

  subroutine skala_tensor_sum(tensor, sum)
    type(torch_tensor), intent(in) :: tensor
    type(torch_tensor), intent(out) :: sum

    interface
      function skala_tensor_sum_c(tensor) result(sum) bind(c, name="skala_tensor_sum")
        import :: c_ptr, c_double
        type(c_ptr), value :: tensor
        type(c_ptr) :: sum
      end function skala_tensor_sum_c
    end interface

    sum%p = skala_tensor_sum_c(tensor%p)
  end subroutine skala_tensor_sum

  function skala_tensor_item_double(tensor) result(value)
    type(torch_tensor), intent(in) :: tensor
    real(c_double) :: value

    interface
      function skala_tensor_item_double_c(tensor) result(value) bind(c, name="skala_tensor_item_double")
        import :: c_ptr, c_double
        type(c_ptr), value :: tensor
        real(c_double) :: value
      end function skala_tensor_item_double_c
    end interface

    value = skala_tensor_item_double_c(tensor%p)
  end function skala_tensor_item_double

  subroutine skala_tensor_mul(a, b, result)
    type(torch_tensor), intent(in) :: a, b
    type(torch_tensor), intent(out) :: result

    interface
      function skala_tensor_mul_c(a, b) result(r) bind(c, name="skala_tensor_mul")
        import :: c_ptr
        type(c_ptr), value :: a, b
        type(c_ptr) :: r
      end function skala_tensor_mul_c
    end interface

    result%p = skala_tensor_mul_c(a%p, b%p)
  end subroutine skala_tensor_mul

  subroutine skala_tensor_mean(tensor, mean)
    type(torch_tensor), intent(in) :: tensor
    type(torch_tensor), intent(out) :: mean

    interface
      function skala_tensor_mean_c(tensor) result(mean) bind(c, name="skala_tensor_mean")
        import :: c_ptr
        type(c_ptr), value :: tensor
        type(c_ptr) :: mean
      end function skala_tensor_mean_c
    end interface

    mean%p = skala_tensor_mean_c(tensor%p)
  end subroutine skala_tensor_mean

  function skala_tensor_numel(tensor) result(n)
    type(torch_tensor), intent(in) :: tensor
    integer(c_int64_t) :: n

    interface
      function skala_tensor_numel_c(tensor) result(n) bind(c, name="skala_tensor_numel")
        import :: c_ptr, c_int64_t
        type(c_ptr), value :: tensor
        integer(c_int64_t) :: n
      end function skala_tensor_numel_c
    end interface

    n = skala_tensor_numel_c(tensor%p)
  end function skala_tensor_numel

  function skala_tensor_ndim(tensor) result(n)
    type(torch_tensor), intent(in) :: tensor
    integer(c_int64_t) :: n

    interface
      function skala_tensor_ndim_c(tensor) result(n) bind(c, name="skala_tensor_ndim")
        import :: c_ptr, c_int64_t
        type(c_ptr), value :: tensor
        integer(c_int64_t) :: n
      end function skala_tensor_ndim_c
    end interface

    n = skala_tensor_ndim_c(tensor%p)
  end function skala_tensor_ndim

  function skala_tensor_size(tensor, dim) result(n)
    type(torch_tensor), intent(in) :: tensor
    integer, intent(in) :: dim
    integer(c_int64_t) :: n

    interface
      function skala_tensor_size_c(tensor, dim) result(n) bind(c, name="skala_tensor_size")
        import :: c_ptr, c_int64_t
        type(c_ptr), value :: tensor
        integer(c_int64_t), value :: dim
        integer(c_int64_t) :: n
      end function skala_tensor_size_c
    end interface

    n = skala_tensor_size_c(tensor%p, int(dim, c_int64_t))
  end function skala_tensor_size

  subroutine skala_tensor_to_array_1d(tensor, array)
    type(torch_tensor), intent(in) :: tensor
    real(c_double), pointer, intent(out) :: array(:)

    interface
      function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr")
        import :: c_ptr
        type(c_ptr), value :: tensor
        type(c_ptr) :: ptr
      end function skala_tensor_data_ptr_c
    end interface

    type(c_ptr) :: data_ptr
    integer(c_int64_t) :: n

    n = skala_tensor_numel(tensor)
    data_ptr = skala_tensor_data_ptr_c(tensor%p)
    call c_f_pointer(data_ptr, array, [n])
  end subroutine skala_tensor_to_array_1d

  subroutine skala_tensor_to_array_2d(tensor, array)
    type(torch_tensor), intent(in) :: tensor
    real(c_double), pointer, intent(out) :: array(:,:)

    interface
      function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr")
        import :: c_ptr
        type(c_ptr), value :: tensor
        type(c_ptr) :: ptr
      end function skala_tensor_data_ptr_c
    end interface

    type(c_ptr) :: data_ptr
    integer(c_int64_t) :: nrow, ncol

    ! C is row-major, Fortran is column-major: swap dimensions
    nrow = skala_tensor_size(tensor, 1)
    ncol = skala_tensor_size(tensor, 0)
    data_ptr = skala_tensor_data_ptr_c(tensor%p)
    call c_f_pointer(data_ptr, array, [nrow, ncol])
  end subroutine skala_tensor_to_array_2d

  subroutine skala_tensor_to_array_3d(tensor, array)
    type(torch_tensor), intent(in) :: tensor
    real(c_double), pointer, intent(out) :: array(:,:,:)

    interface
      function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr")
        import :: c_ptr
        type(c_ptr), value :: tensor
        type(c_ptr) :: ptr
      end function skala_tensor_data_ptr_c
    end interface

    type(c_ptr) :: data_ptr
    integer(c_int64_t) :: n0, n1, n2

    ! C is row-major, Fortran is column-major: reverse dimension order
    n0 = skala_tensor_size(tensor, 2)
    n1 = skala_tensor_size(tensor, 1)
    n2 = skala_tensor_size(tensor, 0)
    data_ptr = skala_tensor_data_ptr_c(tensor%p)
    call c_f_pointer(data_ptr, array, [n0, n1, n2])
  end subroutine skala_tensor_to_array_3d

  subroutine skala_model_get_exc(model, input, exc)
    class(skala_model), intent(inout) :: model
    type(skala_dict), intent(in) :: input
    type(torch_tensor), intent(out) :: exc

    interface
      subroutine skala_model_get_exc_c(model_c, input_c, output_c) bind(c, name="skala_model_get_exc")
        import :: c_ptr
        type(c_ptr), value :: model_c
        type(c_ptr), value :: input_c
        type(c_ptr), intent(out) :: output_c
      end subroutine skala_model_get_exc_c
    end interface

    call skala_model_get_exc_c(model%torch_model%p, input%p, exc%p)
  end subroutine skala_model_get_exc

  subroutine skala_model_get_exc_and_vxc(model, input, exc, vxc)
    class(skala_model), intent(inout) :: model
    type(skala_dict), intent(in) :: input
    type(torch_tensor), intent(out) :: exc
    type(skala_dict), intent(out) :: vxc

    interface
      subroutine skala_model_get_exc_and_vxc_c(model_c, input_c, exc_c, vxc_c) bind(c, name="skala_model_get_exc_and_vxc")
        import :: c_ptr
        type(c_ptr), value :: model_c
        type(c_ptr), value :: input_c
        type(c_ptr), intent(out) :: exc_c
        type(c_ptr), intent(out) :: vxc_c
      end subroutine skala_model_get_exc_and_vxc_c
    end interface

    call skala_model_get_exc_and_vxc_c(model%torch_model%p, input%p, exc%p, vxc%p)
  end subroutine skala_model_get_exc_and_vxc

  function skala_model_needs_feature(model, feature) result(needs)
    class(skala_model), intent(in) :: model
    integer, intent(in) :: feature
    logical :: needs

    needs = any(model%features == feature)
  end function skala_model_needs_feature

  subroutine skala_model_delete(model)
    type(skala_model), intent(inout) :: model
    
    call torch_model_delete(model%torch_model)
  end subroutine skala_model_delete

  subroutine skala_dict_new(input)
    type(skala_dict), intent(out) :: input

    interface
      function skala_dict_new_c() result(input) bind(c, name="skala_dict_new")
        import :: c_ptr
        type(c_ptr) :: input
      end function skala_dict_new_c
    end interface

    input%p = skala_dict_new_c()
  end subroutine skala_dict_new

  subroutine skala_feature_key(feature, key)
    integer, intent(in) :: feature
    character(kind=c_char, len=:), allocatable, intent(out) :: key
    select case(feature)
    case default
      error stop "Unknown feature"
    case (skala_feature%density)
      key = "density"
    case (skala_feature%grad)
      key = "grad"
    case (skala_feature%kin)
      key = "kin"
    case (skala_feature%grid_coords)
      key = "grid_coords"
    case (skala_feature%grid_weights)
      key = "grid_weights"
    case (skala_feature%coarse_0_atomic_coords)
      key = "coarse_0_atomic_coords"
    end select
  end subroutine skala_feature_key

  pure function to_c_str(str) result(c_str)
    character(len=*), intent(in) :: str
    character(kind=c_char) :: c_str(len(str)+1)

    c_str = transfer(str // c_null_char, [character(kind=c_char)::], len(str)+1)
  end function to_c_str

  subroutine skala_dict_insert_one(dict, feature, tensor)
    class(skala_dict), intent(inout) :: dict
    integer, intent(in) :: feature
    type(torch_tensor), intent(in) :: tensor

    interface
      subroutine skala_dict_insert_c(dict, feature, tensors, ntensors) bind(c, name="skala_dict_insert")
        import :: c_ptr, c_size_t, c_char
        type(c_ptr), value :: dict
        character(kind=c_char), intent(in) :: feature(*)
        type(c_ptr), intent(in) :: tensors(*)
        integer(c_size_t), value :: ntensors
      end subroutine skala_dict_insert_c
    end interface

    character(kind=c_char, len=:), allocatable :: key
    type(c_ptr) :: tensor_ptr(1)

    tensor_ptr(1) = tensor%p

    call skala_feature_key(feature, key)
    call skala_dict_insert_c(dict%p, to_c_str(key), tensor_ptr, 1_c_size_t)
  end subroutine skala_dict_insert_one

  subroutine skala_dict_insert_vec(dict, feature, tensors)
    class(skala_dict), intent(inout) :: dict
    integer, intent(in) :: feature
    type(torch_tensor), intent(in) :: tensors(:)

    interface
      subroutine skala_dict_insert_c(dict, feature, tensors, ntensors) bind(c, name="skala_dict_insert")
        import :: c_ptr, c_size_t, c_char
        type(c_ptr), value :: dict
        character(kind=c_char), intent(in) :: feature(*)
        type(c_ptr), intent(in) :: tensors(*)
        integer(c_size_t), value :: ntensors
      end subroutine skala_dict_insert_c
    end interface

    character(kind=c_char, len=:), allocatable :: key
    type(c_ptr), allocatable :: tensor_ptrs(:)
    integer :: iptr

    allocate(tensor_ptrs(size(tensors)))
    do iptr = 1, size(tensors)
      tensor_ptrs(iptr) = tensors(iptr)%p
    end do

    call skala_feature_key(feature, key)
    call skala_dict_insert_c(dict%p, to_c_str(key), tensor_ptrs, size(tensor_ptrs, kind=c_size_t))
  end subroutine skala_dict_insert_vec

  subroutine skala_dict_at_one(dict, feature, tensor)
    class(skala_dict), intent(in) :: dict
    integer, intent(in) :: feature
    type(torch_tensor), intent(out) :: tensor

    interface
      function skala_dict_at_c(dict, key) result(list) bind(c, name="skala_dict_at")
        import :: c_ptr, c_char
        type(c_ptr), value :: dict
        character(kind=c_char), intent(in) :: key(*)
        type(c_ptr) :: list
      end function skala_dict_at_c

      function skala_list_at_c(list, index) result(tensor) bind(c, name="skala_list_at")
        import :: c_ptr, c_size_t
        type(c_ptr), value :: list
        integer(c_size_t), value :: index
        type(c_ptr) :: tensor
      end function skala_list_at_c

      subroutine skala_list_delete_c(list) bind(c, name="skala_list_delete")
        import :: c_ptr
        type(c_ptr), value :: list
      end subroutine skala_list_delete_c
    end interface

    character(kind=c_char, len=:), allocatable :: key
    type(c_ptr) :: list

    call skala_feature_key(feature, key)
    list = skala_dict_at_c(dict%p, to_c_str(key))
    tensor%p = skala_list_at_c(list, 0_c_size_t)
    call skala_list_delete_c(list)
  end subroutine skala_dict_at_one

  subroutine skala_dict_at_vec(dict, feature, tensors, ntensors)
    class(skala_dict), intent(in) :: dict
    integer, intent(in) :: feature
    type(torch_tensor), intent(out) :: tensors(:)
    integer, intent(out) :: ntensors

    interface
      function skala_dict_at_c(dict, key) result(list) bind(c, name="skala_dict_at")
        import :: c_ptr, c_char
        type(c_ptr), value :: dict
        character(kind=c_char), intent(in) :: key(*)
        type(c_ptr) :: list
      end function skala_dict_at_c

      function skala_list_size_c(list) result(n) bind(c, name="skala_list_size")
        import :: c_ptr, c_size_t
        type(c_ptr), value :: list
        integer(c_size_t) :: n
      end function skala_list_size_c

      function skala_list_at_c(list, index) result(tensor) bind(c, name="skala_list_at")
        import :: c_ptr, c_size_t
        type(c_ptr), value :: list
        integer(c_size_t), value :: index
        type(c_ptr) :: tensor
      end function skala_list_at_c

      subroutine skala_list_delete_c(list) bind(c, name="skala_list_delete")
        import :: c_ptr
        type(c_ptr), value :: list
      end subroutine skala_list_delete_c
    end interface

    character(kind=c_char, len=:), allocatable :: key
    type(c_ptr) :: list
    integer :: i

    call skala_feature_key(feature, key)
    list = skala_dict_at_c(dict%p, to_c_str(key))
    ntensors = int(skala_list_size_c(list))
    do i = 1, min(ntensors, size(tensors))
      tensors(i)%p = skala_list_at_c(list, int(i - 1, c_size_t))
    end do
    call skala_list_delete_c(list)
  end subroutine skala_dict_at_vec

  subroutine skala_dict_delete(dict)
    type(skala_dict), intent(inout) :: dict

    interface
      subroutine skala_dict_delete_c(dict) bind(c, name="skala_dict_delete")
        import :: c_ptr
        type(c_ptr), value :: dict
      end subroutine skala_dict_delete_c
    end interface

    call skala_dict_delete_c(dict%p)
  end subroutine skala_dict_delete

end module skala_ftorch

Fortran application#

Finally, we have the Fortran application itself, which demonstrates how to use the Skala bindings to compute exchange-correlation energies and potentials. We start the main program with the necessary module imports and variable declarations:

app/main.f90#
program main
  use iso_c_binding, only : c_double
  use ftorch, only : torch_tensor
  use skala_ftorch, only : skala_model, skala_model_load, skala_feature, skala_tensor_load, &
    & skala_tensor_sum, skala_tensor_mean, skala_tensor_mul, skala_tensor_item_double, &
    & skala_tensor_to_array, skala_dict, skala_dict_new

  implicit none

  type(skala_model) :: model
  type(skala_dict) :: input, vxc
  type(torch_tensor) :: exc
  type(torch_tensor) :: density, grad, kin, grid_coords, grid_weights, coarse_0_atomic_coords
  type(torch_tensor) :: dexc_ddensity, dexc_dgrad, dexc_dkin, dexc_dgrid_coords, &
    & dexc_dgrid_weights, dexc_dcoarse_0_atomic_coords, vxc_norm

  character(len=:), allocatable :: path, feature_dir

First, we obtain the command line arguments for the model path and feature directory, and check that they are provided:

app/main.f90#
  cli_input: block
  call get_argument(1, path)
  if (.not. allocated(path)) exit cli_input
  call get_argument(2, feature_dir)
  if (.not. allocated(feature_dir)) exit cli_input
  end block cli_input
  if (.not. allocated(path) .or. .not.allocated(feature_dir)) then
    call get_argument(0, path)
    print '(a)', "Usage: "//path//" <model_path> <feature_dir>"
    stop 1
  end if

We define a small contained helper procedure to read the command line arguments:

app/main.f90#
contains
  subroutine get_argument(idx, arg)
    integer, intent(in) :: idx
    character(len=:), allocatable, intent(out) :: arg

    integer :: length, stat

    call get_command_argument(idx, length=length, status=stat)
    if (stat /= 0) return

    allocate(character(len=length) :: arg, stat=stat)
    if (stat /= 0) return

    if (length > 0) then
      call get_command_argument(idx, arg, status=stat)
      if (stat /= 0) then
        deallocate(arg)
        return
      end if
    end if
  end subroutine get_argument
end program main

The main types provided by the Skala bindings are the skala_model type, which extends the torch_model provided by FTorch and has a custom skala_model_load procedure for loading the Skala model and its meta data.

app/main.f90#
  ! Load the model
  print '(a)', "[1] Loading model from "//path
  call skala_model_load(model, path)

The input to the Skala model is passed via a dictionary of tensors, which we prepare by loading the necessary features from disk and converting them to the appropriate format.

app/main.f90#
  print '(a)', "[2] Loading features from "//feature_dir
  get_features: block
  integer :: ift
  do ift = 1, size(model%features)
    select case(model%features(ift))
    case(skala_feature%density)
      print '(a)', " -> Loading density"
      call skala_tensor_load(density, feature_dir//"/density.pt")
    case(skala_feature%grad)
      print '(a)', " -> Loading grad"
      call skala_tensor_load(grad, feature_dir//"/grad.pt")
    case(skala_feature%kin)
      print '(a)', " -> Loading kin"
      call skala_tensor_load(kin, feature_dir//"/kin.pt")
    case(skala_feature%grid_coords)
      print '(a)', " -> Loading grid_coords"
      call skala_tensor_load(grid_coords, feature_dir//"/grid_coords.pt")
    case(skala_feature%grid_weights)
      print '(a)', " -> Loading grid_weights"
      call skala_tensor_load(grid_weights, feature_dir//"/grid_weights.pt")
    case(skala_feature%coarse_0_atomic_coords)
      print '(a)', " -> Loading coarse_0_atomic_coords"
      call skala_tensor_load(coarse_0_atomic_coords, feature_dir//"/coarse_0_atomic_coords.pt")
    end select
  end do
  end block get_features

Note

To export the features from Python, you can use the provided prepare_inputs.py script.

prepare_inputs.py#
#!/usr/bin/env python3

import argparse
from pathlib import Path

import torch
from pyscf import dft, gto
from pyscf.dft import gen_grid

from skala.functional.traditional import LDA
from skala.pyscf.features import generate_features


def main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--output-dir",
        default=".",
        type=Path,
        help="Output directory for generated feature files.",
    )
    args = parser.parse_args()

    args.output_dir.mkdir(parents=True, exist_ok=True)

    molecule = gto.Mole(
        atom="H 0 0 0; H 0 0 1",
        basis="def2-qzvp",
        verbose=0,
    ).build()

    # Create a set of meta-GGA features for this molecule.
    dm = get_density_matrix(molecule)
    grid = gen_grid.Grids(molecule)
    grid.level = 3
    grid.build()
    features = generate_features(molecule, dm, grid)

    # Add a feature called `coarse_0_atomic_coords` containing the atomic coordinates.
    features["coarse_0_atomic_coords"] = torch.from_numpy(molecule.atom_coords())

    # Save all features as individual .pt files.
    for key, value in features.items():
        torch.save(value, str(args.output_dir / f"{key}.pt"))

    print(f"Saved features to {args.output_dir}")

    lda_exc = LDA().get_exc(features)
    print(f"For reference, LDAx Exc = {lda_exc.item()}")


def get_density_matrix(mol: gto.Mole) -> torch.Tensor:
    """Computes an example density matrix for a given molecule using PySCF."""
    ks = dft.RKS(mol, xc="b3lyp5")
    ks = ks.density_fit()
    ks.kernel()
    dm = torch.from_numpy(ks.make_rdm1())
    return dm


if __name__ == "__main__":
    main()

Tip

Here we load the features directly from torch .pt files, in your application you may want to create them directly from Fortran arrays as supported in FTorch.

To place the features in the correct format for Skala, we add them to the input dictionary with the appropriate keys.

app/main.f90#
  ! Prepare the input dictionary for the model
  print '(a)', "[3] Preparing input dictionary"
  call skala_dict_new(input)
  if (model%needs_feature(skala_feature%density)) &
    call input%insert(skala_feature%density, density)
  if (model%needs_feature(skala_feature%grad)) &
    call input%insert(skala_feature%grad, grad)
  if (model%needs_feature(skala_feature%kin)) &
    call input%insert(skala_feature%kin, kin)
  if (model%needs_feature(skala_feature%grid_coords)) &
    call input%insert(skala_feature%grid_coords, grid_coords)
  if (model%needs_feature(skala_feature%grid_weights)) &
    call input%insert(skala_feature%grid_weights, grid_weights)
  if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) &
    call input%insert(skala_feature%coarse_0_atomic_coords, coarse_0_atomic_coords)

Note

The input dictionary accepts both single tensors and lists of tensors. If your program operates on multiple grids, you can pass those together to the input dictionary and Skala will handle them correctly.

type(skala_dict) :: input
type(torch_tensor) :: grid_coords(3), grid_weights(3)
! Load the grid features into the arrays
call input%insert(skala_feature%grid_coords, grid_coords)
call input%insert(skala_feature%grid_weights, grid_weights)

With this we can now compute the exchange-correlation energy and potential by calling the Skala model with the prepared inputs.

app/main.f90#
  ! Request exc and vxc from the model
  print '(a)', "[4] Running model inference"
  call model%get_exc_and_vxc(input, exc, vxc)

To get the exchange-correlation energy, we need to weight the exc values by the grid weights and sum over the grid points, which we can do using the provided tensor operations.

app/main.f90#
  ! Print the exchange-correlation energy
  print '(a)', "[5] Computing XC energy = sum(exc * grid_weights)"
  exc_weighted: block
  type(torch_tensor) :: weighted, weighted_sum
  call skala_tensor_mul(exc, grid_weights, weighted)
  call skala_tensor_sum(weighted, weighted_sum)
  print '(a, es22.14)', " -> E_xc = ", skala_tensor_item_double(weighted_sum)
  end block exc_weighted

Note

The used FTorch version 1.0.0 does not yet support the torch_tensor_sum operation, therefore we provide an implementation ourselves. Future versions of FTorch might cover more features which are provided by the FTorch extensions in the src/ directory, so be sure to check the documentation for the latest updates.

Now we can build our application using CMake:

cmake -B build -S . -GNinja
cmake --build build

To evaluate Skala, we download the model checkpoint from HuggingFace using the hf CLI from the huggingface_hub package:

hf download microsoft/skala skala-1.0.fun --local-dir .

Note

To create the features directory run the prepare_inputs.py script from the examples/cpp/cpp_integration/ directory. This will generate the necessary input features for the H2 molecule with the def2-QZVP basis set.

python prepare_inputs.py --output_dir features --molecule H2 --basis def2-QZVP

The script needs the skala package installed in your Python environment, which can be done via pip:

pip install skala

And run the application, passing the path to the Skala model and the feature directory as command line arguments.

./build/Skala skala-1.0.fun features

The output for the H2 molecule with the def2-QZVP basis set should look like this:

[1] Loading model from skala-1.0.fun
[2] Loading features from H2-def2qzvp
 -> Loading coarse_0_atomic_coords
 -> Loading grad
 -> Loading grid_coords
 -> Loading kin
 -> Loading grid_weights
 -> Loading density
[3] Preparing input dictionary
[4] Running model inference
[5] Computing XC energy = sum(exc * grid_weights)
 -> E_xc =  -6.23580775902096E-0

The get_exc_vxc procedure computes the exchange-correlation energy and potential, which we can then access from the returned dictionary. The potential terms are stored under the same keys as the input features and can be extracted as tensors.

app/main.f90#
  print '(a)', "[6] Extracting vxc components"
  if (model%needs_feature(skala_feature%density)) &
    call vxc%at(skala_feature%density, dexc_ddensity)
  if (model%needs_feature(skala_feature%grad)) &
    call vxc%at(skala_feature%grad, dexc_dgrad)
  if (model%needs_feature(skala_feature%kin)) &
    call vxc%at(skala_feature%kin, dexc_dkin)
  if (model%needs_feature(skala_feature%grid_coords)) &
    call vxc%at(skala_feature%grid_coords, dexc_dgrid_coords)
  if (model%needs_feature(skala_feature%grid_weights)) &
    call vxc%at(skala_feature%grid_weights, dexc_dgrid_weights)
  if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) &
    call vxc%at(skala_feature%coarse_0_atomic_coords, dexc_dcoarse_0_atomic_coords)

We can use those tensors for further processing in our application, for example to compute the norm of the potential

app/main.f90#
  ! Print mean of each gradient component
  print '(a)', "[7] Gradient means (dexc/dx)"
  print_gradients: block
  type(torch_tensor) :: grad_mean
  if (model%needs_feature(skala_feature%density)) then
    call skala_tensor_mean(dexc_ddensity, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_density)                = ", &
      skala_tensor_item_double(grad_mean)
  end if
  if (model%needs_feature(skala_feature%grad)) then
    call skala_tensor_mean(dexc_dgrad, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_grad)                   = ", &
      skala_tensor_item_double(grad_mean)
  end if
  if (model%needs_feature(skala_feature%kin)) then
    call skala_tensor_mean(dexc_dkin, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_kin)                    = ", &
      skala_tensor_item_double(grad_mean)
  end if
  if (model%needs_feature(skala_feature%grid_coords)) then
    call skala_tensor_mean(dexc_dgrid_coords, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_grid_coords)            = ", &
      skala_tensor_item_double(grad_mean)
  end if
  if (model%needs_feature(skala_feature%grid_weights)) then
    call skala_tensor_mean(dexc_dgrid_weights, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_grid_weights)           = ", &
      skala_tensor_item_double(grad_mean)
  end if
  if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) then
    call skala_tensor_mean(dexc_dcoarse_0_atomic_coords, grad_mean)
    print '(a, es22.14)', " -> mean(dexc/d_coarse_0_atomic_coords) = ", &
      skala_tensor_item_double(grad_mean)
  end if
  end block print_gradients

Or by converting them to Fortran arrays and using the built-in array operations.

app/main.f90#
  ! Demonstrate direct Fortran array access to tensor data
  print '(a)', "[8] Accessing tensor data as Fortran arrays"
  array_access: block
  real(c_double), pointer :: arr1d(:), arr2d(:,:), arr3d(:,:,:)

  ! exc is 1-D (npts)
  call skala_tensor_to_array(exc, arr1d)
  print '(a, i0, a)', " -> exc: shape = (", size(arr1d), ")"
  print '(a, 3es22.14, a)', &
    "      [", arr1d(:3), " ...]"

  ! density is 2-D (nspin, npts)
  if (model%needs_feature(skala_feature%density)) then
    call skala_tensor_to_array(dexc_ddensity, arr2d)
    print '(a, i0, a, i0, a)', " -> dexc/d_density: shape = (", &
      size(arr2d, 1), ", ", size(arr2d, 2), ")"
    print '(a, 3es22.14, a)', &
      "     [[", arr2d(:3, 1), " ...]", &
      "      [", arr2d(:3, 2), " ...]]"
  end if

  ! grad is 3-D (nspin, 3, npts)
  if (model%needs_feature(skala_feature%grad)) then
    call skala_tensor_to_array(dexc_dgrad, arr3d)
    print '(a, i0, a, i0, a, i0, a)', " -> dexc/d_grad: shape = (", &
      size(arr3d, 1), ", ", size(arr3d, 2), ", ", size(arr3d, 3), ")"
    print '(a, 3es22.14, a)', &
      "    [[[", arr3d(:3, 1, 1), " ...]", &
      "      [", arr3d(:3, 2, 1), " ...]]", &
      "     [[  ...                    ]]]"
  end if

  ! kin is 2-D (nspin, npts)
  if (model%needs_feature(skala_feature%kin)) then
    call skala_tensor_to_array(dexc_dkin, arr2d)
    print '(a, i0, a, i0, a)', " -> dexc/d_kin: shape = (", &
      size(arr2d, 1), ", ", size(arr2d, 2), ")"
    print '(a, 3es22.14, a)', &
      "     [[", arr2d(:3, 1), " ...]", &
      "      [", arr2d(:3, 2), " ...]]"
  end if

  ! grid_coords is 2-D (npts, 3)
  if (model%needs_feature(skala_feature%grid_coords)) then
    call skala_tensor_to_array(dexc_dgrid_coords, arr2d)
    print '(a, i0, a, i0, a)', " -> dexc/d_grid_coords: shape = (", &
      size(arr2d, 1), ", ", size(arr2d, 2), ")"
    print '(a, 3es22.14, a)', &
      "     [[", arr2d(:3, 1), " ...]", &
      "      [", arr2d(:3, 2), " ...]]"
  end if

  ! grid_weights is 1-D (npts)
  if (model%needs_feature(skala_feature%grid_weights)) then
    call skala_tensor_to_array(dexc_dgrid_weights, arr1d)
    print '(a, i0, a)', " -> dexc/d_grid_weights: shape = (", size(arr1d), ")"
    print '(a, 3es22.14, a)', &
      "      [", arr1d(:3), " ...]"
  end if

  ! coarse_0_atomic_coords is 2-D (natoms, 3)
  if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) then
    call skala_tensor_to_array(dexc_dcoarse_0_atomic_coords, arr2d)
    print '(a, i0, a, i0, a)', " -> dexc/d_coarse_0_atomic_coords: shape = (", &
      size(arr2d, 1), ", ", size(arr2d, 2), ")"
    print '(a, 3es22.14, a)', &
      "     [[", arr2d(:3, 1), "]", "      [  ...]]"
  end if
  end block array_access

We rebuild the application to include the latest changes:

cmake --build build

Finally, we can run the application again to see the updated output with the computed potential values.

./build/Skala skala-1.0.fun features

In the output we can see the computed exchange-correlation energy as well as the mean values of the potential components, and the raw tensor data for each component.

[1] Loading model from ../../cpp/gauxc_integration/skala-1.0.fun
[2] Loading features from H2-def2qzvp
 -> Loading coarse_0_atomic_coords
 -> Loading grad
 -> Loading grid_coords
 -> Loading kin
 -> Loading grid_weights
 -> Loading density
[3] Preparing input dictionary
[4] Running model inference
[5] Computing XC energy = sum(exc * grid_weights)
 -> E_xc =  -6.23580775902096E-01
[6] Extracting vxc components
[7] Gradient means (dexc/dx)
 -> mean(dexc/d_density)                =  -5.22209689934698E-03
 -> mean(dexc/d_grad)                   =  -5.63822012469897E-12
 -> mean(dexc/d_kin)                    =  -5.74746008322732E-04
 -> mean(dexc/d_grid_coords)            =  -2.86873728034485E-14
 -> mean(dexc/d_grid_weights)           =  -3.47603697347615E-02
 -> mean(dexc/d_coarse_0_atomic_coords) =   2.81365752618854E-10
[8] Accessing tensor data as Fortran arrays
 -> exc: shape = (19616)
      [ -1.40859407629398E-11 -5.02191290297704E-14 -4.82271929094491E-19 ...]
 -> dexc/d_density: shape = (19616, 2)
     [[ -1.41238001293684E-02 -7.07236361510544E-03 -1.22373184625014E-03 ...]
      [ -1.41238001293684E-02 -7.07236361510544E-03 -1.22373184625014E-03 ...]]
 -> dexc/d_grad: shape = (19616, 3, 2)
    [[[  2.54671362468241E-15  3.18318623003652E-19  2.04989634182288E-27 ...]
      [  2.54671362468241E-15  3.18318623003652E-19  2.04989634182288E-27 ...]]
     [[  ...                    ]]]
 -> dexc/d_kin: shape = (19616, 2)
     [[ -2.80759310169539E-07 -2.05221067626510E-09 -6.12585093687525E-14 ...]
      [ -2.80759310169539E-07 -2.05221067626510E-09 -6.12585093687525E-14 ...]]
 -> dexc/d_grid_coords: shape = (3, 19616)
     [[  0.00000000000000E+00  0.00000000000000E+00  0.00000000000000E+00 ...]
      [  0.00000000000000E+00  0.00000000000000E+00  0.00000000000000E+00 ...]]
 -> dexc/d_grid_weights: shape = (19616)
      [ -1.40859407629398E-11 -5.02191290297704E-14 -4.82271929094491E-19 ...]
 -> dexc/d_coarse_0_atomic_coords: shape = (3, 2)
     [[ -4.21657770765388E-10  1.95481841380307E-10 -4.14780685154337E-03]
      [  ...]]

Summary#

In this example, we have demonstrated how to use Skala in Fortran with the FTorch library. We have shown how to set up the development environment, build the application using CMake, and run the application to compute exchange-correlation energies and potentials using a Skala model. This example serves as a starting point for integrating Skala into your Fortran applications using FTorch, and can be extended to include more complex features and functionalities as needed.