# file      : data/buildfile
# license   : Unicode License; see accompanying LICENSE file

# Detect host endianness.
#
# Note that this buildfile is also included by ../tools/buildfile and we
# include that buildfile via the below tool importation. Thus there is an
# include cycle. It is harmless provided we set data_basename and data_name
# before the imports.
#
using autoconf

h{endian}: in{endian}

[rule_hint=c.predefs] buildfile{endian}: h{endian}
{
  c.predefs.poptions = false
  c.predefs.macros = ICU_DATA_CHAR@icu_data_char
}
% update
if ($c.id == 'msvc'       ? ($c.version.major < 19 ||      \
                             ($c.version.major == 19 &&    \
                              $c.version.minor < 20))    : \
    $c.id.type == 'clang' ? $c.version.major < 12        : \
    false)
{{
  diag c-predefs $< -> $>

  cat <<EOF >$path($>)
    icu_data_char = 'l'
    EOF
}}

./: buildfile{endian} # Make sure it gets cleaned.

if ($build.meta_operation == 'perform')
{
  update buildfile{endian}
  source $path(buildfile{endian})
}
else
  icu_data_char = 'l' # l for little-endian, b for big-endian.

assert ($icu_data_char == 'l' || $icu_data_char == 'b')

# These variables correspond to the upstream ICUDATA_BASENAME_VERSION and
# ICUDATA_PLATFORM_NAME respectively.
#
data_basename = "icudt$(version.major)"        # icudt74
data_name     = "$data_basename$icu_data_char" # icudt74l | icudt74b

# Alternative versions of the `icu_data_char` and `data_name` variables for
# the non-native endianness.
#
alt_icu_data_char = ($icu_data_char == 'l' ? 'b' : 'l')
alt_data_name = "$data_basename$alt_icu_data_char" # icudt74b | icudt74l

import! [metadata] genbrk = exe{genbrk}
import! [metadata] gencfu = exe{gencfu}
import! [metadata] gencnval = exe{gencnval}
import! [metadata] gendict = exe{gendict}
import! [metadata] genrb = exe{genrb}
import! [metadata] gensprep = exe{gensprep}
import! [metadata] icupkg = exe{icupkg}
import! [metadata] makeconv = exe{makeconv}
import! [metadata] pkgdata = exe{pkgdata}

# The data source files are processed into binary form by various ICU tools
# and then packed into the ICU data archive (e.g., icudt74l.dat) by the
# pkgdata tool.
#
# The ICU default data is provided to the ICU tools in the form of the data
# archive, whereas it is provided to the ICU libraries in the form of the
# icudata library (produced in the libicuuc package). See the root README-DEV
# for the reason we diverge from upstream in this way.
#
# The path to the data archive's directory is compiled into the tools as the
# ICU data directory via the U_ICU_DATA_DEFAULT_DIR macro in
# icu-bootstrap/buildfile.
#
# Note that ICU allows packaged data items (those in the data archive) to be
# overridden by individual/standalone files. As a result, unless care is
# taken, the presence of the intermediate build output could prevent the tools
# from searching the data archive. This would obviously be a mistake once the
# data archive has been built, but during the data archive build the tools do
# actually need to read the individual data files in order to satisfy internal
# data dependencies. Thus we need different build-time and run-time ICU data
# directories.
#
# We also have to make the $out_base/ directory tree mirror that of $src_base/
# so that the myriad data subdirectories are created automatically. However
# the tools expect individual data files to be inside <ICU_DATA_DIR>/icudtXXX/
# (see `data_name` in root.build for details). Therefore set the build-time
# ICU data directory to $out_base/$btdd/ using the --icudatadir tools option
# and create $btdd/icudtXXX as a symlink to $out_base. The $btdd/ indirection
# achieves the separation of the build-time and run-time ICU data paths:
# without it the tools will read <ICU_DATA_DIR>/icudtXXX/foo.bar before
# searching for foo.bar in <ICU_DATA_DIR>/icudtXXX.dat.
#
# We also support the creation of a data archive for the non-native
# (alternative) endianness. This version would be required if libicuuc (which
# builds the ICU data library from the data archive) is being built for a
# target with a different endianness than that of the host.

# The path to the build-time data directory.
#
btdd = [dir_path] build-time-data-dir

# The ICU data archive.
#
# Note that the data file targets and file{icudata.lst} are added as
# prerequisites below, just before the ad hoc recipe that generates
# file{icudata.lst}.
#
# Also note that ICU initialization, normally performed by the ICU tools,
# relies on the presence of a valid cnvalias.icu file and fails if that's not
# the case. The ICU tools try to ignore this failure to be usable during the
# bootstrap phase of the build (see the u_init() call in the upstream's
# icu-tools/tools/genrb/genrb.cpp file for an example). For cases where that's
# not possible (incomplete cnvalias.icu may be present, etc), the upstream's
# build system declares cnvalias.icu as a prerequisite for the being generated
# targets (see upstream's icu-tools/data/BUILDRULES.py file for details).
# However, it turns out that not all such targets are properly covered in ICU
# 74.2. For example, genrb fails with the `can not initialize ICU` error while
# generating a rbnf/res{*} target in the presence of an incomplete
# cnvalias.icu file. The backported `Avoid segfault due to incomplete
# cnvalias.icu` commit declares cnvalias.icu as a prerequisite for the vast
# majority (if not all) genrb-generated targets in BUILDRULES.py. We used to
# specify the file{cnvalias.icu} target as a prerequisite as precise as the
# upstream's build system. However, to be on the safe side, let's just always
# specify this prerequisite for all the targets generated by tools, for which
# the mentioned failure have been observed. Specifically, for genrb, pkgdata,
# and genbrk utilities. This way the set of the cnvalias.icu-dependent targets
# will be a superset of that one in the upstream's build system.
#
./: file{$(data_name).dat}: fsdir{$btdd/$data_name} $pkgdata file{cnvalias.icu}
{{
  # --mode common: Produce the data archive (.dat) only.
  #
  $pkgdata --quiet --copyright          \
           --mode common                \
           --name      $data_name       \
           --sourcedir $out_base        \
           --destdir   $out_base        \
           --tempdir   $out_base        \
           $path(file{icudata.lst})
}}

# The alternative ICU data archive with the non-native endianness.
#
# Convert the native endianness archive instead of rebuilding.
#
./: file{$(alt_data_name).dat}: file{$(data_name).dat} $icupkg
{{
  $icupkg --type $alt_icu_data_char $path($<[0]) $path($>[0])
}}

# Create the build-time ICU data directory path (see above).
#
fsdir{$btdd/$data_name}: fsdir{$btdd/}
% update
{{
  ln -s $out_base/ $out_base/$btdd/$data_name
}}
% clean
{{
  rm $out_base/$btdd/$data_name/
}}

# Binary data file target types.
#
# See
# https://unicode-org.github.io/icu/userguide/icu_data/#icu-data-file-formats
# and upstream/icu4c/source/data/BUILDRULES.py for more information.
#

# ICU Data filter file (currently only used for resource bundles in
# collation/).
#
define flt: file
flt{*}: extension = txt

# Used for various types/formats of ICU data files.
#
define icu: file
icu{*}: extension = icu

# Normalization data.
#
define nrm: file
nrm{*}: extension = nrm

# Dictionary-based break iterator data.
#
define dict: file
dict{*}: extension = dict

# A conversion mapping table.
#
define cnv: file
cnv{*}: extension = cnv

# Rule-based break iterator data.
#
define brk: file
brk{*}: extension = brk

# Stringprep profile data.
#
define spp: file
spp{*}: extension = spp

# Generate conversion (charset) aliases.
#
file{cnvalias.icu}: mappings/file{convrtrs.txt} $gencnval
{{
  # Work around a Windows-specific problem in which gencnval fails to open the
  # output file for writing if it already exists due to it being locked for
  # some mysterious reason (antivirus?). The output file will already exist in
  # case of update followed by install because the tools will have to be
  # rebuilt with the install-specific ICU data directory and thus the data
  # will be generated for a second time.
  #
  rm -f $path($>[0])

  # Note that the output file is always called cnvalias.icu.
  #
  $gencnval --sourcedir $directory($path($<[0]))        \
            --destdir   $directory($path($>[0]))        \
            $leaf($path($<[0]))
}}

# Add cnvalias.icu to icudata.lst's list of targets (which will be further
# updated like this throughout the rest of the file). Note: See also
# `file{icudata.lst}: res_index_targets` below.
#
file{icudata.lst}: targets += file{cnvalias.icu}

# Dependencies for the res_index.res targets that are required for the
# following directories.
#
res_index_dirs = brkitr         \
                 coll           \
                 curr           \
                 lang           \
                 locales        \
                 rbnf           \
                 region         \
                 unit           \
                 zone

for d: $res_index_dirs
{
  # Source and output subdirectories.
  #
  # Outputs go into the corresponding output subdirectory except for locales/
  # in which case they go directly into `./`.
  #
  sd = [dir_path] $d
  od = [dir_path] ($d != locales ? $d : .)

  t = $od/res{res_index}      # Target
  s = $od/file{res_index.txt} # Source file (also generated)

  # Dependencies for res_index.res and its dependency res_index.txt.
  #
  $t: $s $genrb
  $s: $sd/file{LOCALE_DEPS.json}

  # Keep the res_index targets separate from `file{icudata.lst}: targets`
  # because they are handled differently in the icudata.lst ad hoc recipe.
  #
  file{icudata.lst}: res_index_targets += $t
}

# Dependencies for the res{*} targets in rbnf/.
#
for s: rbnf/file{*.txt -res_index.txt} # Exclude res_index if in-source build.
{
  t = rbnf/res{$name($s)}
  $t: $s $genrb

  file{icudata.lst}: targets += $t
}

# Dependencies for coll/ucadata.icu, generated from
# in/coll/ucadata-{unihan,implicithan}.icu. The upstream default is unihan.
#
coll/icu{ucadata}: in/coll/file{ucadata-unihan.icu} $icupkg

file{icudata.lst}: targets += coll/icu{ucadata}

coll/icu{ucadata}: in/coll/file{ucadata-unihan-*}       \
                   in/coll/file{ucadata-implicithan*}: include = false

# Dependencies for the res{*} targets in misc/.
#
misc_res_trgs =
for s: misc/file{*.txt}
{
  t = res{$name($s)}
  misc_res_trgs += $t

  $t: $s $genrb file{cnvalias.icu}

  file{icudata.lst}: targets += $t
}

fsdir{filters/coll_tree/}: fsdir{filters/}

# Dependencies for the res{*} targets in coll/.
#
for s: coll/file{*.txt -res_index.txt} # Exclude res_index if in-source build.
{
  t = coll/res{$name($s)}

  $t: $s $genrb         \
      coll/icu{ucadata} \
      $misc_res_trgs    \
      unidata/file{UCARules.txt}

  # Dependencies and genrb options for the filter files.
  #
  $t: filters/coll_tree/flt{$name($s)}
  {
    genrb_options += --filterDir $out_base/filters/coll_tree
  }

  # Add filters/coll_tree/ as prerequisite because it does not exist in the
  # source tree (otherwise it would be created automatically).
  #
  filters/coll_tree/flt{$name($s)}: fsdir{filters/coll_tree/}

  file{icudata.lst}: targets += $t
}

# Generate a collation data filter file. These filters are applied by default
# by the upstream build. See source/data/BUILDRULES.py.
#
flt{~'/(.*)/'}:
{{
  diag gen $>

  echo \
"# Caution: This file is automatically generated

+/
-/collations/big5han
-/collations/gb2312han" \
  > $path($>[0])
}}

# Dependencies for the res{*} targets in the subdirectories that have a pool
# bundle.
#
for d: curr     \
       lang     \
       locales  \
       region   \
       unit     \
       zone
{
  # Source and output subdirectories.
  #
  # Outputs go into the corresponding output subdirectory except for locales/
  # in which case they go directly into `./`.
  #
  sd = [dir_path] $d
  od = [dir_path] ($d != locales ? $d : .)

  # Exclude "supplemental" resource bundles which are not included in their
  # subdirectories' pool bundles or res_index's.
  #
  if ($sd == curr)
    excl = supplementalData.txt
  elif ($sd == zone)
    excl = tzdbNames.txt

  excl += res_index.txt # Exclude if in-source build.
  for s: $sd/file{*.txt -{$excl}}
  {
    t = $od/res{$name($s)}

    $t: $s $genrb $od/res{pool}
    $t: genrb_options += --usePoolBundle "$out_base/$od"

    file{icudata.lst}: targets += $t
  }

  $od/res{pool}: $sd/file{*.txt -{$excl}} $genrb

  file{icudata.lst}: targets += $od/res{pool}
}

# These "supplemental" resource bundles are not included in their
# subdirectories' pool bundles and are therefore also generated without the
# --usePoolBundle option.
#
curr/res{supplementalData}: curr/file{supplementalData.txt} $genrb
zone/res{tzdbNames}:        zone/file{tzdbNames.txt}        $genrb

file{icudata.lst}: targets += curr/res{supplementalData}        \
                              zone/res{tzdbNames}

# Dependencies for rule-based transform (transliterator) data.
#
translit/res{root}: translit/file{root.txt} $genrb
translit/res{en}:   translit/file{en.txt}   $genrb
translit/res{el}:   translit/file{el.txt}   $genrb

translit/res{root en el}: translit/file{*.txt -{root.txt en.txt el.txt}}

file{icudata.lst}: targets += translit/res{root en el}

# Dependencies for dictionary-based break iterator data (brkiter/dict{*}).
#
dict_trgs =
for s: brkitr/dictionaries/file{*.txt}
{
  t = brkitr/dict{$name($s)}

  $t: $s $gendict

  dict_trgs += $t
}

# gendict options for brkiter/dict{*}. See source/data/BUILDRULES.py.
#
brkitr/dict{burmesedict}: gendict_options += --bytes --transform offset-0x1000
brkitr/dict{cjdict}:      gendict_options += --uchars
brkitr/dict{khmerdict}:   gendict_options += --bytes --transform offset-0x1780
brkitr/dict{laodict}:     gendict_options += --bytes --transform offset-0x0e80
brkitr/dict{thaidict}:    gendict_options += --bytes --transform offset-0x0e00

file{icudata.lst}: targets += $dict_trgs

# See source/data/BUILDRULES.py for why the other .icu files in in/ are
# excluded.
#
for s: in/file{unames.icu ulayout.icu uemoji.icu}
{
  sd = $path.relative($directory($s), [dir_path] in) # Subdir of data/.
  t = $sd/icu{$name($s)}

  $t: $s $icupkg

  file{icudata.lst}: targets += $t
}

# Dependencies for rule-based break iterator data (brkitr/rules/foo.txt ->
# foo.brk).
#
brkitr_brk_trgs =
for s: brkitr/rules/file{*.txt}
{
  t = brkitr/brk{$name($s)}

  $t: $s $genbrk

  # These dependencies are defined in generate_brkitr_brk in
  # upstream/icu4c/source/data/BUILDRULES.py. Note that the lstm and adaboost
  # dependencies are disabled by default in
  # source/python/icutools/databuilder/filtration.py.
  #
  $t: file{cnvalias.icu} icu{ulayout uemoji}

  brkitr_brk_trgs += $t
}

file{icudata.lst}: targets += $brkitr_brk_trgs

# Dependencies for the res{*} targets in brkitr/.
#
for s: brkitr/file{*.txt -res_index.txt} # Exclude res_index if in-source build.
{
  t = brkitr/res{$name($s)}

  $t: $s $genrb $dict_trgs $brkitr_brk_trgs

  file{icudata.lst}: targets += $t
}

# Dependencies for conversion mapping tables (foo.ucm -> foo.cnv).
#
# Note that some of these .ucm input files are of the form macos-7_3-10.2.ucm
# (i.e., contain multiple dots; hence the use of `...`).
#
for s: mappings/file{*.ucm...}
{
  t = cnv{$name($s)...}

  $t: $s $makeconv

  file{icudata.lst}: targets += $t
}

# Dependencies for normalization data (in/foo.nrm -> foo.nrm). See
# source/data/BUILDRULES.py for why nfc.nrm is excluded.
#
for s: in/file{*.nrm -nfc.nrm}
{
  nrm{$name($s)}: $s $icupkg

  file{icudata.lst}: targets += nrm{$name($s)}
}

# Dependencies and ad hoc recipe for confusables data.
#
# Note that the -w gencfu option is deprecated and ignored in the code so,
# unlike upstream, we omit it and its argument confusablesWholeScript.txt.
#
file{confusables.cfu}: unidata/file{confusables.txt}    \
                       file{cnvalias.icu}               \
                       $gencfu
{{
  $gencfu --copyright --quiet           \
          --icudatadir $out_base/$btdd  \
          -r $path($<[0])               \
          -o $path($>[0])
}}

file{icudata.lst}: targets += file{confusables.cfu}

# Dependencies for stringprep profile data.
#
for s: sprep/file{*.txt}
{
  t = spp{$name($s)}

  $t: $s unidata/file{NormalizationCorrections.txt} $gensprep

  file{icudata.lst}: targets += $t
}

# Generate stringprep profile data (foo.txt -> foo.spp).
#
spp{~'/(.+)/'}: file{~'/\1.txt/'}                               \
                unidata/file{NormalizationCorrections.txt}      \
                $gensprep
{{
  # Note that unlike most of the other tools gensprep does not work with
  # absolute paths so use of --sourcedir is required.
  #
  $gensprep --bundle-name $name($<[0])                  \
            --norm-correction $src_base/unidata         \
            --unicode 3.2.0                             \
            --icudatadir $out_base/$btdd                \
            --sourcedir $directory($path($<[0]))        \
            --destdir   $directory($path($>[0]))        \
            $leaf($path($<[0]))
}}

# Insert the data targets into the data archive's prerequisites before
# icudata.lst to ensure their paths will be resolved before icudata.lst is
# generated. Although this would usually be expressed by making the data
# targets prerequisites of icudata.lst, that would cause the latter to be
# updated whenever the contents of any data target changed which is not the
# correct semantics. (Note that a modification of the `targets` or
# `res_index_targets` variables will automatically render the icudata.lst
# target out of date.)
#
file{$(data_name).dat}: $(file{icudata.lst}: targets)           \
                        $(file{icudata.lst}: res_index_targets) \
                        file{icudata.lst}

# Generate a list of all target paths, relative to `./`. Sort alphabetically
# but res_index.res has to be at the top of every directory. E.g.:
#
# res_index.res
# af.res
# af_NA.res
# af_ZA.res
# ...
# brkitr/res_index.res
# brkitr/burmesedict.dict
# brkitr/char.brk
# ...
#
file{icudata.lst}:
{{
  diag gen $>

  o = $path($>[0]) # Output file path.

  # Generate a sorted list of all data targets. Make the paths relative to ./
  # and sort them.
  #
  cont = # Output file contents.

  for t: $targets
    cont += "$path.relative($path($t), $out_base)"

  cont = $name.sort($cont)

  # Write the targets to the output file, inserting the res_index.res targets
  # manually as we go along.
  #
  # Start with the data directory root's res_index.
  #
  echo "$path.relative($path(res{res_index}), $out_base)" > $o

  cd = # Current directory
  for t: $cont
  {
    d = $directory($t) # Target directory (if any).

    # Insert res_index if we've entered a new res_index directory.
    #
    if (!$empty($d) && $d != $cd && $name.find($res_index_dirs, $name($d)))
    {
      cd = $d
      rip = $path($d/res{res_index}) # res_index target path.
      echo "$path.relative($rip, $out_base)" >> $o
    }

    echo $t >> $o
  }
}}

# Generate res_index.txt for a data directory. See source/data/BUILDRULES.py.
#
file{~'/res_index.txt/'}: file{~'/LOCALE_DEPS.json/'}
{{
  o  = $path($>[0])             # Output path.
  sd = $directory($path($<[0])) # Source directory path.

  # Generate a sorted listing of .txt files in the source directory to
  # initialise the list of input file names.
  #
  find $sd -name \*.txt | set -n ifs # Input files
  ifs = $name.sort($name($ifs))

  # Remove the target itself in case of an in-source build.
  #
  ifs = $regex.filter_out_match($ifs, '^res_index$')

  # Remove the "supplemental" resource bundles (handled separately) from the
  # list of input files. Note that they also need to be excluded from the pool
  # bundle.
  #
  sdn = $leaf($sd) # Source directory name.
  if ($sdn == 'curr')
    ifs = $regex.filter_out_match($ifs, '^supplementalData$')
  elif ($sdn == 'zone')
    ifs = $regex.filter_out_match($ifs, '^tzdbNames$')

  depdb hash $ifs

  diag gen $>

  # Read LOCALE_DEPS.json (the first prerequisite), replace C++ comments with
  # empty lines, and then parse its contents.
  #
  cat $path($<[0]) | sed -e 's%^//.*%%' | set ldt # LOCALE_DEPS text
  ld = $json.parse($ldt) # LOCALE_DEPS parsed JSON

  if ($null($ld) || $json.value_size($ld) == 0)
    exit "failed to parse $path($<[0])"

  # Extract the CLDR (Common Locale Data Repository) version and list of
  # aliases from LOCALE_DEPS.
  #
  cldr_version = ($ld["cldrVersion"])

  if ($null($cldr_version))
    exit "cldrVersion member is missing from $path($<[0])"

  ao = ($ld["aliases"]) # Aliases object
  aliases = ($null($ao) ? [null] : $object_names($ao))

  # Generate the contents of the `InstalledLocales:table` section of the
  # output file from the list of input file names.
  #
  # Remove from the list of input file names the aliases and the following set
  # of exclusions.
  #
  # "Exclude the deprecated locale variants and root; see ICU-20628."
  #
  excl = ja_JP_TRADITIONAL     \
	 th_TH_TRADITIONAL     \
	 de_                   \
	 de__PHONEBOOK         \
	 es_                   \
	 es__TRADITIONAL       \
	 root

  for a: $aliases $excl
    ifs = $regex.filter_out_match($ifs, "^\s*$a\$")

  # Write the output file.
  #
  echo "// Warning this file is automatically generated
  res_index:table\(nofallback\) {
  CLDRVersion { \"$cldr_version\" }" > $o

  # Write the InstalledLocales:table secion.
  #
  echo "  InstalledLocales:table {" >> $o
  for f: $ifs
    echo "    $f {\"\"}" >> $o
  echo "  }" >> $o

  # Write the AliasLocales:table section.
  #
  echo "  AliasLocales:table {" >> $o
  for a: $aliases
    echo "    $a {\"\"}" >> $o
  echo '  }' >> $o

  echo '}' >> $o
}}

# Generate a pool bundle (pool.res) from a set of resource bundles.
#
res{~'/pool/'}: file{~'/.*.txt/'} $genrb file{cnvalias.icu}
{{
  # Generate a sorted list of input file names from the prerequisites.
  #
  # Note: upstream also passes a sorted list and the order does affect the
  # checksum of the output file.
  #
  ifs = $name.filter_out($<, exe) # Remove exe{genrb} from the prerequisite list.
  ifs = $name.sort($name($ifs))
  ifs = $regex.apply($ifs, '(.+)', '\1.txt') # Append .txt to each name.

  $genrb --quiet                                \
         --sourcedir  $directory($path($<[0]))  \
         --destdir    $directory($path($>[0]))  \
         --icudatadir $out_base/$btdd           \
         --writePoolBundle -k                   \
         $ifs
}}

# Generate a resource bundle (foo.txt -> foo.res).
#
res{~'/(.*)/'}: file{~'/\1.txt/'} $genrb file{cnvalias.icu}
{{
  $genrb --quiet                                \
         --sourcedir  $directory($path($<[0]))  \
         --destdir    $directory($path($>[0]))  \
         --icudatadir $out_base/$btdd           \
         $genrb_options -k                      \
         $leaf($path($<[0]))
}}

# Generate dictionary-based break iterator data (foo.txt -> foo.dict).
#
dict{~'/(.*)/'}: file{~'/\1.txt/'} $gendict
{{
  $gendict --quiet --copyright          \
           --icudatadir $out_base/$btdd \
           $gendict_options             \
           $path($<[0])                 \
           $path($>[0])
}}

# Convert an ICU data file (of various types) to the specified endianness.
#
icu{~'/(.*)/'}: file{~'/.*.icu/'} $icupkg
{{
   $icupkg --type $icu_data_char $path($<[0]) $path($>[0])
}}

# Convert normalization data to the specified endianness.
#
nrm{~'/(.*)/'}: file{~'/.*.nrm/'} $icupkg
{{
   $icupkg --type $icu_data_char $path($<[0]) $path($>[0])
}}

# Generate a conversion mapping table (foo.ucm -> foo.cnv).
#
cnv{~'/(.+)/'}: file{~'/\1.ucm/'} $makeconv
{{
  $makeconv --copyright                         \
            --destdir $directory($path($>[0]))  \
            $path($<[0])
}}

# Compile break iteration rules: foo.txt -> foo.brk.
#
brk{~'/(.+)/'}: file{~'/\1.txt/'} $genbrk file{cnvalias.icu}
{{
  # genbrk [-options] -r rule-file -o output-file
  #
  $genbrk --quiet --copyright           \
          --icudatadir $out_base/$btdd  \
          -r $path($<[0])               \
          -o $path($>[0])
}}

# Install the data archives for the tools to find.
#
file{$(data_name).dat $(alt_data_name).dat}: install = data/
