NuGraph Shower Reco - davidc1/Documentation GitHub Wiki

10/12

Setup:

/cvmfs/uboone.opensciencegrid.org/bin/shell_apptainer.sh
source /cvmfs/uboone.opensciencegrid.org/products/setup_uboone.sh
setup uboonecode v10_00_01 -q e26:prof

Need to set up local uboonecode with several repositories with the following branches:

ubreco -> feature/cerati-NuGraphMCC10
ubana -> feature/cerati-NuGraphMCC10
uboonedata -> feature/davidc_nugraph
uboonecode -> feature/cerati-NuGraphMCC10

Full setup steps:

mkdir /exp/uboone/app/users/$USER/v10_00_01
cd /exp/uboone/app/users/$USER/v10_00_01
mrb newDev
source localProducts_larsoft_v10_00_01_e26_prof/setup
cd srcs/
mrb g -t UBOONE_SUITE_v10_00_01 ubana
cd ubana
git checkout feature/cerati-NuGraphMCC10
cd $MRB_SOURCE
mrb g -t UBOONE_SUITE_v10_00_01 ubreco
cd ubreco
git checkout feature/cerati-NuGraphMCC10
cd $MRB_SOURCE
mrb g -t UBOONE_SUITE_v10_00_01 uboonecode
cd uboonecode
git checkout feature/cerati-NuGraphMCC10
cd $MRB_SOURCE
mrb g -t UBOONE_SUITE_v10_00_01 uboonedata
cd $MRB_SOURCE/uboonedata
git checkout feature/davidc_nugraph
cd $MRB_TOP/srcs
cd $MRB_BUILDDIR
mrbsetenv
mrb i -j
mrbslp

After building the code, need to make a local tar ball, e.g.:

make_tar_uboone.sh -d localProducts_larsoft_v10_00_01_e26_prof/ /pnfs/uboone/resilient/users/davidc/jobs/v10_00_01/local.tar

To launch jobs, below is an example XML which I use to submit jobs with the following command:

project.py --xml grid_nugraph.xml --stage nueopenntuple --submit
<?xml version="1.0"?>

<!-- Production Project -->

<!DOCTYPE project [
<!ENTITY release "v10_00_01">
<!ENTITY file_type "mcc10">
<!ENTITY run_type "physics">
<!ENTITY name "nugraph">
<!ENTITY tag "devel">
<!ENTITY date "102224">
<!ENTITY user "davidc">

<!ENTITY numu    "prodgenie_bnb_nu_uboone_overlay_mcc9.1_v08_00_00_26_filter_run1_reco2_reco2" >
<!ENTITY nue     "prodgenie_bnb_intrinsice_nue_uboone_overlay_mcc9.1_v08_00_00_26_run1_reco2_reco2" >
<!ENTITY nueRUN4B "run4b_bnb_intrinsic_nue_overlay_pandora_reco2_reco2_reco2" >
<!ENTITY dirt    "prodgenie_bnb_dirt_overlay_mcc9.1_v08_00_00_26_run1_reco2_reco2" >
<!ENTITY bnb     "data_bnb_mcc9.1_v08_00_00_25_reco2_C1_beam_good_reco2_5e19" >
<!ENTITY extC1   "data_extbnb_mcc9.1_v08_00_00_25_reco2_C1_all_reco2" >
<!ENTITY extC2   "data_extbnb_mcc9.1_v08_00_00_25_reco2_C2_all_reco2" >
<!ENTITY nueopen "prodgenie_bnb_intrinsice_nue_uboone_overlay_mcc9.1_v08_00_00_26_run1_reco2_reco2">

<!ENTITY version   "v00" >

]>

<job>
<project name="&name;">

  <!-- Project size -->
  <numevents>10000</numevents>

  <!-- Operating System -->
  <os>SL7</os>

  <!-- Batch resources -->
  <resource>DEDICATED,OPPORTUNISTIC</resource>

  <!-- Larsoft information -->
  <larsoft>
    <tag>&release;</tag>
    <qual>e26:prof</qual>
    <local>/pnfs/uboone/resilient/users/davidc/jobs/&release;/local.tar</local>
  </larsoft>

  <!-- Project stages -->

  <stage name="nueopen">
    <inputdef>&nueopen;</inputdef>
    <fcl>reco_uboone_mcc9_8_driver_overlay_stage2_nugraph.fcl</fcl>
    <fcl>run_fullshowerreco.fcl</fcl>
    <!--<fcl>run_neutrinoselectionfilter_run1_overlay_cc0pinp.fcl</fcl>-->
    <outdir>/pnfs/uboone/scratch/users/&user;/&release;/&name;/&date;/&nueopen;/&version;</outdir>
    <logdir>/pnfs/uboone/scratch/users/&user;/&release;/&name;/&date;/&nueopen;/&version;</logdir>
    <workdir>/pnfs/uboone/scratch/users/&user;/work/&release;/&name;/&date;/&nueopen;/&version;</workdir>
    <bookdir>/exp/uboone/data/users/&user;/book/&release;/&name;/&date;/&nueopen;/&version;</bookdir>
    <datatier>reconstructed</datatier>
    <defname>&name;_&tag;_reco</defname>
    <check>0</check>
    <memory>4000</memory>
    <disk>20GB</disk>
    <numjobs>2000</numjobs>
    <maxfilesperjob>1</maxfilesperjob>
    <schema>gsiftp</schema> <!-- Herb's work-around -->
    <jobsub>--expected-lifetime=24h --append_condor_requirements='(TARGET.HAS_CVMFS_uboone_opensciencegrid_org==true)' -e XRD_CONNECTIONRETRY=32 -e XRD_REDIRECTLIMIT=255 -e XRD_REQUESTTIMEOUT=3600 </jobsub>
    <jobsub_start>--expected-lifetime=24h --append_condor_requirements='(TARGET.HAS_CVMFS_uboone_opensciencegrid_org==true)'</jobsub_start>
  </stage>

  <stage name="nueopenntuple">
    <!--<inputdef>&nueopen;</inputdef>-->
    <inputlist>/pnfs/uboone/persistent/users/davidc/jobs/v10_00_01/nueopen_showers.txt</inputlist>
    <fcl>run_neutrinoselectionfilter_run1_overlay_cc0pinp.fcl</fcl>
    <outdir>/pnfs/uboone/scratch/users/&user;/&release;/&name;/&date;/&nueopen;_ntuple/&version;</outdir>
    <logdir>/pnfs/uboone/scratch/users/&user;/&release;/&name;/&date;/&nueopen;_ntuple/&version;</logdir>
    <workdir>/pnfs/uboone/scratch/users/&user;/work/&release;/&name;/&date;/&nueopen;_ntuple/&version;</workdir>
    <bookdir>/exp/uboone/data/users/&user;/book/&release;/&name;/&date;/&nueopen;_ntuple/&version;</bookdir>
    <datatier>reconstructed</datatier>
    <defname>&name;_&tag;_reco</defname>
    <check>0</check>
    <memory>2000</memory>
    <disk>20GB</disk>
    <numjobs>1300</numjobs>
    <maxfilesperjob>1</maxfilesperjob>
    <schema>gsiftp</schema> <!-- Herb's work-around -->
    <jobsub>--expected-lifetime=24h --append_condor_requirements='(TARGET.HAS_CVMFS_uboone_opensciencegrid_org==true)' -e XRD_CONNECTIONRETRY=32 -e XRD_REDIRECTLIMIT=255 -e XRD_REQUESTTIMEOUT=3600 </jobsub>
    <jobsub_start>--expected-lifetime=24h --append_condor_requirements='(TARGET.HAS_CVMFS_uboone_opensciencegrid_org==true)'</jobsub_start>
  </stage>

<!-- file type -->
  <filetype>&file_type;</filetype>

  <!-- run type -->
  <runtype>&run_type;</runtype>

</project>

</job>

To calculate the number of jobs to submit we need to know how many files are in each sample:

samweb list-definition-files --summary prodgenie_bnb_nu_uboone_overlay_mcc9.1_v08_00_00_26_filter_run1_reco2_reco2
⚠️ **GitHub.com Fallback** ⚠️