Skip to content

Simulation Tools

TOAST contains a variety of Operators and other tools for simulating telescope observations and different detector signals.

Simulated Observing

When designing new telescopes or observing strategies the TOAST scheduler can be used to create schedule files that can be passed to the SimGround and SimSatellite operators.

Ground-Based Schedules

Sky Patches

To-Do

We can't add docs for the patch types, because they have no docstrings...

Scheduling Utilities

To-Do

Do we want more of the low-level tools here?

toast.schedule_sim_ground.parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp)

Source code in toast/schedule_sim_ground.py
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
@function_timer
def parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp):
    # Parse the patch definitions
    log = Logger.get()
    patches = []
    total_weight = 0
    for patch_def in args.patch:
        parts = patch_def.split(",")
        name = parts[0]
        log.info(f'Adding patch "{name}"')
        if parts[1].upper() == "HORIZONTAL":
            patch = parse_patch_horizontal(args, parts)
        elif parts[1].upper() == "WEIGHTED_HORIZONTAL":
            patch = parse_patch_weighted_horizontal(args, parts)
        elif parts[1].upper() == "SIDEREAL":
            patch = parse_patch_sidereal(args, parts)
        elif parts[1].upper() == "MAX-DEPTH":
            patch = parse_patch_max_depth(args, parts, observer)
        elif parts[1].upper() == "SSO":
            patch = parse_patch_sso(args, parts)
        elif parts[1].upper() == "COOLER":
            patch = parse_patch_cooler(args, parts, start_timestamp)
        else:
            weight = float(parts[1])
            if np.isnan(weight):
                raise RuntimeError("Patch has NaN priority: {}".format(patch_def))
            if weight == 0:
                raise RuntimeError("Patch has zero priority: {}".format(patch_def))
            if len(parts[2:]) == 3:
                corners = parse_patch_center_and_width(args, parts)
                area = None
            elif len(parts[2:]) == 4:
                corners, area = parse_patch_rectangular(args, parts)
            else:
                corners = parse_patch_explicit(args, parts)
                area = None
            patch = Patch(
                name,
                weight,
                corners,
                el_min=args.el_min_deg * degree,
                el_max=args.el_max_deg * degree,
                el_step=args.el_step_deg * degree,
                alternate=args.alternate,
                site_lat=observer.lat,
                area=area,
                ra_period=args.ra_period,
                ra_amplitude=args.ra_amplitude_deg,
                dec_period=args.dec_period,
                dec_amplitude=args.dec_amplitude_deg,
                elevations=args.elevations_deg,
            )
        if args.equalize_area or args.debug:
            area = patch.get_area(observer, nside=32, equalize=args.equalize_area)
        total_weight += patch.weight
        patches.append(patch)

        if patches[-1].el_max0 is not None:
            el_max = np.degrees(patches[-1].el_max0)
            log.debug(f"Highest possible observing elevation: {el_max:.2f} deg.")
        if patches[-1]._area is not None:
            log.debug(f"Sky fraction = {patch._area:.4f}")

    if args.debug:
        import matplotlib.pyplot as plt

        polmap = None
        if args.polmap:
            polmap = hp.read_map(args.polmap, [1, 2])
            bad = polmap[0] == hp.UNSEEN
            polmap = np.sqrt(polmap[0] ** 2 + polmap[1] ** 2) * 1e6
            polmap[bad] = hp.UNSEEN
        plt.style.use("default")
        cmap = cm.inferno
        cmap.set_under("w")
        plt.figure(figsize=[20, 4])
        plt.subplots_adjust(left=0.1, right=0.9)
        patch_color = "black"
        sun_color = "black"
        sun_lw = 8
        sun_avoidance_color = "gray"
        moon_color = "black"
        moon_lw = 2
        moon_avoidance_color = "gray"
        alpha = 0.5
        avoidance_alpha = 0.01
        sun_step = int(86400 * 0.25)
        moon_step = int(86400 * 0.1)
        for iplot, coord in enumerate("CEG"):
            scoord = {"C": "Equatorial", "E": "Ecliptic", "G": "Galactic"}[coord]
            title = scoord  # + ' patch locations'
            if polmap is None:
                nside = 256
                avoidance_map = np.zeros(12 * nside**2)
                # hp.mollview(np.zeros(12) + hp.UNSEEN, coord=coord, cbar=False,
                #            title='', sub=[1, 3, 1 + iplot], cmap=cmap)
            else:
                hp.mollview(
                    polmap,
                    coord="G" + coord,
                    cbar=True,
                    unit=r"$\mu$K",
                    min=args.pol_min,
                    max=args.pol_max,
                    norm="log",
                    cmap=cmap,
                    title=title,
                    sub=[1, 3, 1 + iplot],
                    notext=True,
                    format="%.1f",
                    xsize=1600,
                )
            # Plot sun and moon avoidance circle
            sunlon, sunlat = [], []
            moonlon, moonlat = [], []
            for lon, lat, sso, angle_min, alt_min, color, step, lw in [
                (
                    sunlon,
                    sunlat,
                    sun,
                    np.radians(args.sun_avoidance_angle_deg),
                    np.radians(args.sun_avoidance_altitude_deg),
                    sun_avoidance_color,
                    sun_step,
                    sun_lw,
                ),
                (
                    moonlon,
                    moonlat,
                    moon,
                    np.radians(args.moon_avoidance_angle_deg),
                    np.radians(args.moon_avoidance_altitude_deg),
                    moon_avoidance_color,
                    moon_step,
                    moon_lw,
                ),
            ]:
                for t in range(int(start_timestamp), int(stop_timestamp), step):
                    observer.date = to_DJD(t)
                    sso.compute(observer)
                    lon.append(np.degrees(sso.a_ra))
                    lat.append(np.degrees(sso.a_dec))
                    if angle_min <= 0 or sso.alt < alt_min:
                        continue
                    if polmap is None:
                        # accumulate avoidance map
                        vec = hp.dir2vec(lon[-1], lat[-1], lonlat=True)
                        pix = hp.query_disc(nside, vec, angle_min)
                        for p in pix:
                            avoidance_map[p] += 1
                    else:
                        # plot a circle around the location
                        clon, clat = [], []
                        phi = sso.a_ra
                        theta = sso.a_dec
                        r = angle_min
                        for ang in np.linspace(0, 2 * np.pi, 36):
                            dtheta = np.cos(ang) * r
                            dphi = np.sin(ang) * r / np.cos(theta + dtheta)
                            clon.append(np.degrees(phi + dphi))
                            clat.append(np.degrees(theta + dtheta))
                        hp.projplot(
                            clon,
                            clat,
                            "-",
                            color=color,
                            alpha=avoidance_alpha,
                            lw=lw,
                            threshold=1,
                            lonlat=True,
                            coord="C",
                        )
            if polmap is None:
                avoidance_map[avoidance_map == 0] = hp.UNSEEN
                hp.mollview(
                    avoidance_map,
                    coord="C" + coord,
                    cbar=False,
                    title="",
                    sub=[1, 3, 1 + iplot],
                    cmap=cmap,
                )
            hp.graticule(30, verbose=False)

            # Plot patches
            for patch in patches:
                lon = [np.degrees(corner._ra) for corner in patch.corners]
                lat = [np.degrees(corner._dec) for corner in patch.corners]
                if len(lon) == 0:
                    # Special patch without sky coordinates
                    continue
                lon.append(lon[0])
                lat.append(lat[0])
                log.info(f"{patch.name,} corners:\n lon = {lon}\n lat= {lat}")
                hp.projplot(
                    lon,
                    lat,
                    "-",
                    threshold=1,
                    lonlat=True,
                    coord="C",
                    color=patch_color,
                    lw=2,
                    alpha=alpha,
                )
                if len(patches) > 10:
                    continue
                # label the patch
                it = np.argmax(lat)
                area = patch.get_area(observer)
                title = "{} {:.2f}%".format(patch.name, 100 * area)
                hp.projtext(
                    lon[it],
                    lat[it],
                    title,
                    lonlat=True,
                    coord="C",
                    color=patch_color,
                    fontsize=14,
                    alpha=alpha,
                )
            if polmap is not None:
                # Plot Sun and Moon trajectory
                hp.projplot(
                    sunlon,
                    sunlat,
                    "-",
                    color=sun_color,
                    alpha=alpha,
                    threshold=1,
                    lonlat=True,
                    coord="C",
                    lw=sun_lw,
                )
                hp.projplot(
                    moonlon,
                    moonlat,
                    "-",
                    color=moon_color,
                    alpha=alpha,
                    threshold=1,
                    lonlat=True,
                    coord="C",
                    lw=moon_lw,
                )
                hp.projtext(
                    sunlon[0],
                    sunlat[0],
                    "Sun",
                    color=sun_color,
                    lonlat=True,
                    coord="C",
                    fontsize=14,
                    alpha=alpha,
                )
                hp.projtext(
                    moonlon[0],
                    moonlat[0],
                    "Moon",
                    color=moon_color,
                    lonlat=True,
                    coord="C",
                    fontsize=14,
                    alpha=alpha,
                )

        plt.savefig("patches.png")
        plt.close()

    # Normalize the weights
    for i in range(len(patches)):
        patches[i].weight /= total_weight
    return patches

toast.schedule_sim_ground.build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon)

Source code in toast/schedule_sim_ground.py
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
@function_timer
def build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon):
    log = Logger.get()

    sun_el_max = args.sun_el_max_deg * degree
    el_min = args.el_min_deg
    el_max = args.el_max_deg
    if args.elevations_deg is None:
        el_min = args.el_min_deg
        el_max = args.el_max_deg
    else:
        # Override the elevation limits
        el_min = 90
        el_max = 0
        for el in args.elevations_deg.split(","):
            el = float(el)
            el_min = min(el * 0.9, el_min)
            el_max = max(el * 1.1, el_max)
    el_min *= degree
    el_max *= degree
    fp_radius = args.fp_radius_deg * degree

    fname_out = args.out
    dir_out = os.path.dirname(fname_out)
    if dir_out:
        log.info(f"Creating '{dir_out}'")
        os.makedirs(dir_out, exist_ok=True)
    fout = open(fname_out, "w")

    header_fmt = "#{:14} {:15} {:>15} {:>15} {:>15}\n"
    header_data_fmt = "{:15} {:15} {:15.3f} {:15.3f} {:15.1f}\n"
    if args.field_separator != "":
        header_fmt = header_fmt.replace(" ", args.field_separator)
        header_data_fmt = header_data_fmt.replace(" ", args.field_separator)
    fout.write(
        header_fmt.format(
            "Site", "Telescope", "Latitude [deg]", "Longitude [deg]", "Elevation [m]"
        )
    )
    fout.write(
        header_data_fmt.format(
            args.site_name,
            args.telescope,
            np.degrees(observer.lat),
            np.degrees(observer.lon),
            observer.elevation,
        )
    )

    if args.verbose_schedule:
        fout_fmt0 = (
            "#{:>19} {:>20} {:>14} {:>14} {:>8} "
            "{:35} {:>8} {:>8} {:>8} {:>5} "
            "{:>8} {:>8} {:>8} {:>8} "
            "{:>8} {:>8} {:>8} {:>8} {:>5} "
            "{:>5} {:>3} {:>8}\n"
        )
        fout_fmt = (
            "{:20} {:20} {:14.6f} {:14.6f} {:8.2f} "
            "{:35} {:8.2f} {:8.2f} {:8.2f} {:5} "
            "{:8.2f} {:8.2f} {:8.2f} {:8.2f} "
            "{:8.2f} {:8.2f} {:8.2f} {:8.2f} {:5.2f} "
            "{:5} {:3} {:8.3f}\n"
        )
        if args.field_separator != "":
            fout_fmt0 = fout_fmt0.replace(" ", args.field_separator)
            fout_fmt = fout_fmt.replace(" ", args.field_separator)
        fout.write(
            fout_fmt0.format(
                "Start time UTC",
                "Stop time UTC",
                "Start MJD",
                "Stop MJD",
                "Rotation",
                "Patch name",
                "Az min",
                "Az max",
                "El",
                "R/S",
                "Sun el1",
                "Sun az1",
                "Sun el2",
                "Sun az2",
                "Moon el1",
                "Moon az1",
                "Moon el2",
                "Moon az2",
                "Phase",
                "Pass",
                "Sub",
                "CTime",
            )
        )
    else:
        # Concise schedule format
        fout_fmt0 = "#{:>19} {:>20} {:>8} {:35} {:>8} {:>8} {:>8} {:>5} {:>3}\n"
        fout_fmt = "{:>20} {:>20} {:8.2f} {:35} {:8.2f} {:8.2f} {:8.2f} {:5} {:3}\n"
        if args.field_separator != "":
            fout_fmt0 = fout_fmt0.replace(" ", args.field_separator)
            fout_fmt = fout_fmt.replace(" ", args.field_separator)
        fout.write(
            fout_fmt0.format(
                "Start time UTC",
                "Stop time UTC",
                "Rotation",
                "Patch name",
                "Az min",
                "Az max",
                "El",
                "Pass",
                "Sub",
            )
        )

    # Operational days
    ods = set()

    t = start_timestamp
    last_successful = t
    last_el = None
    while True:
        t, blocked = apply_blockouts(args, t)
        boresight_angle = get_boresight_angle(args, t)
        if t > stop_timestamp:
            break
        if t - last_successful > args.elevation_change_time_s:
            # It no longer matters what the last used elevation was
            last_el = None
        if t - last_successful > 86400 or blocked:
            # A long time has passed since the last successfully
            # scheduled scan.
            # Reset the individual patch az and el limits
            for patch in patches:
                patch.reset()
            if blocked:
                last_successful = t
            else:
                # Only try this once for every day.  Swapping
                # `t` <-> `last_successful` means that we will not trigger
                # this branch again without scheduling a succesful scan
                log.debug(
                    f"Resetting patches and returning to the last successful "
                    f"scan: {to_UTC(last_successful)}"
                )
                t, last_successful = last_successful, t

        # Determine which patches are observable at time t.

        log.debug(f"t = {to_UTC(t)}")
        # Determine which patches are visible
        observer.date = to_DJD(t)
        sun.compute(observer)
        if sun.alt > sun_el_max:
            log.debug(
                "Sun elevation is {:.2f} > {:.2f}. Moving on.".format(
                    np.degrees(sun.alt), np.degrees(sun_el_max)
                )
            )
            t = advance_time(t, args.time_step_s)
            continue
        moon.compute(observer)

        visible, not_visible = get_visible(args, observer, patches, el_min)

        if len(visible) == 0:
            tutc = to_UTC(t)
            log.debug(f"No patches visible at {tutc}: {not_visible}")
            t = advance_time(t, args.time_step_s)
            continue

        # Determine if a cooler cycle sets a limit for observing
        tstop_cooler = stop_timestamp
        for patch in patches:
            if isinstance(patch, CoolerCyclePatch):
                ttest = patch.last_cycle_end + patch.hold_time_max
                if ttest < tstop_cooler:
                    tstop_cooler = ttest

        # Order the targets by priority and attempt to observe with both
        # a rising and setting scans until we find one that can be
        # succesfully scanned.
        # If the criteria are not met, advance the time by a step
        # and try again

        prioritize(args, observer, visible, last_el)

        if args.pole_mode:
            success, t, el = attempt_scan_pole(
                args,
                observer,
                visible,
                not_visible,
                t,
                fp_radius,
                el_max,
                el_min,
                stop_timestamp,
                tstop_cooler,
                sun,
                moon,
                sun_el_max,
                fout,
                fout_fmt,
                ods,
                boresight_angle,
                # Pole scheduling does not (yet) implement
                # elevation change penalty
                # last_successful,
                # last_el,
            )
        else:
            success, t, el = attempt_scan(
                args,
                observer,
                visible,
                not_visible,
                t,
                fp_radius,
                stop_timestamp,
                tstop_cooler,
                sun,
                moon,
                sun_el_max,
                fout,
                fout_fmt,
                ods,
                boresight_angle,
                last_successful,
                last_el,
            )

        if args.operational_days and len(ods) > args.operational_days:
            break

        if not success:
            log.debug(f"No patches could be scanned at {to_UTC(t)}: {to_UTC(t)}")
            t = advance_time(t, args.time_step_s)
        else:
            last_successful = t
            last_el = el

    fout.close()
    return

Generating the Schedule

toast.schedule_sim_ground.run_scheduler(opts=None)

Source code in toast/schedule_sim_ground.py
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
def run_scheduler(opts=None):
    args, start_timestamp, stop_timestamp = parse_args(opts=opts)

    observer = ephem.Observer()
    observer.lon = args.site_lon
    observer.lat = args.site_lat
    observer.elevation = args.site_alt  # In meters
    observer.epoch = "2000"
    observer.temp = 0  # in Celcius
    observer.compute_pressure()

    sun = ephem.Sun()
    moon = ephem.Moon()

    patches = parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp)

    build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon)
    return

Space-Based Schedules

Generating schedules for a satellite is conceptually simpler due to the constraints on spacecraft dynamics.

toast.schedule_sim_satellite.create_satellite_schedule(prefix='', mission_start=None, observation_time=10 * u.minute, gap_time=0 * u.minute, num_observations=1, prec_period=10 * u.minute, spin_period=2 * u.minute, site_name='space', telescope_name='satellite')

Generate a satellite observing schedule.

This creates a series of scans with identical lengths and rotation rates, as well as optional gaps between.

Parameters:

Name Type Description Default
prefix str

The prefix for the name of each scan.

''
mission_start datetime

The overall start time of the schedule.

None
observation_time Quantity

The length of each observation.

10 * minute
gap_time Quantity

The time between observations.

0 * minute
num_observations int

The number of observations.

1
prec_period Quantity

The time for one revolution about the precession axis.

10 * minute
spin_period Quantity

The time for one revolution about the spin axis.

2 * minute
site_name str

The name of the site to include in the schedule.

'space'
telescope_name str

The name of the telescope to include in the schedule.

'satellite'

Returns:

Type Description
SatelliteSchedule

The resulting schedule.

Source code in toast/schedule_sim_satellite.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def create_satellite_schedule(
    prefix="",
    mission_start=None,
    observation_time=10 * u.minute,
    gap_time=0 * u.minute,
    num_observations=1,
    prec_period=10 * u.minute,
    spin_period=2 * u.minute,
    site_name="space",
    telescope_name="satellite",
):
    """Generate a satellite observing schedule.

    This creates a series of scans with identical lengths and rotation rates, as well
    as optional gaps between.

    Args:
        prefix (str):  The prefix for the name of each scan.
        mission_start (datetime):  The overall start time of the schedule.
        observation_time (Quantity):  The length of each observation.
        gap_time (Quantity):  The time between observations.
        num_observations (int):  The number of observations.
        prec_period (Quantity):  The time for one revolution about the precession axis.
        spin_period (Quantity):  The time for one revolution about the spin axis.
        site_name (str):  The name of the site to include in the schedule.
        telescope_name (str):  The name of the telescope to include in the schedule.

    Returns:
        (SatelliteSchedule):  The resulting schedule.

    """
    log = Logger.get()
    if mission_start is None:
        raise RuntimeError("You must specify the mission start")

    if mission_start.tzinfo is None:
        msg = f"Mission start time '{mission_start}' is not timezone-aware.  Assuming UTC."
        log.warning(msg)
        mission_start = mission_start.replace(tzinfo=datetime.timezone.utc)

    obs = datetime.timedelta(seconds=observation_time.to_value(u.second))
    gap = datetime.timedelta(seconds=gap_time.to_value(u.second))
    epsilon = datetime.timedelta(seconds=0)
    if gap_time.to_value(u.second) == 0:
        # If there is no gap, we add a tiny break (much less than one sample for any
        # reasonable experiment) so that the start time of one observation is never
        # identical to the stop time of the previous one.
        epsilon = datetime.timedelta(microseconds=2)

    total = obs + gap

    scans = list()
    for sc in range(num_observations):
        start = sc * total + mission_start
        stop = start + obs - epsilon
        name = "{}{:06d}_{}".format(prefix, sc, start.isoformat(timespec="minutes"))
        scans.append(
            SatelliteScan(
                name=name,
                start=start,
                stop=stop,
                prec_period=prec_period,
                spin_period=spin_period,
            )
        )
    return SatelliteSchedule(
        scans=scans, site_name=site_name, telescope_name=telescope_name
    )

Creating Observations

toast.ops.SimGround

Bases: Operator

Simulate a generic ground-based telescope scanning.

This simulates ground-based pointing in constant elevation scans for a telescope located at a particular site and using an pre-created schedule.

The created observations define several interval lists to describe regions where the telescope is scanning left, right or in a turnaround or El-nod. A shared flag array is also created with bits sets for these same properties.

Source code in toast/ops/sim_ground.py
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
@trait_docs
class SimGround(Operator):
    """Simulate a generic ground-based telescope scanning.

    This simulates ground-based pointing in constant elevation scans for a telescope
    located at a particular site and using an pre-created schedule.

    The created observations define several interval lists to describe regions where
    the telescope is scanning left, right or in a turnaround or El-nod.  A shared
    flag array is also created with bits sets for these same properties.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    telescope = Instance(
        klass=Telescope, allow_none=True, help="This must be an instance of a Telescope"
    )

    session_split_key = Unicode(
        None, allow_none=True, help="Focalplane key for splitting into observations"
    )

    weather = Unicode(
        None,
        allow_none=True,
        help="Name of built-in weather site (e.g. 'atacama', 'south_pole') or path to HDF5 file",
    )

    realization = Int(0, help="The realization index")

    schedule = Instance(
        klass=GroundSchedule, allow_none=True, help="Instance of a GroundSchedule"
    )

    randomize_phase = Bool(
        False,
        help="If True, the Constant Elevation Scan will begin at a randomized phase.",
    )

    track_azimuth = Bool(
        False,
        help="If True, the azimuth throw is continually adjusted to center the field.",
    )

    use_ephem = Bool(
        True,
        help="Use PyEphem to convert between horizontal and equatorial systems",
    )

    use_qpoint = Bool(
        False,
        help="Use qpoint to convert between horizontal and equatorial systems",
    )

    scan_rate_az = Quantity(
        1.0 * u.degree / u.second,
        help="The sky or mount azimuth scanning rate.  See `fix_rate_on_sky`",
    )

    fix_rate_on_sky = Bool(
        True,
        help="If True, `scan_rate_az` is given in sky coordinates and azimuthal rate "
        "on mount will be adjusted to meet it.  If False, `scan_rate_az` is used as "
        "the mount azimuthal rate.",
    )

    scan_rate_el = Quantity(
        1.0 * u.degree / u.second,
        allow_none=True,
        help="The sky elevation scanning rate",
    )

    scan_accel_az = Quantity(
        1.0 * u.degree / u.second**2,
        help="Mount scanning rate acceleration for turnarounds",
    )

    scan_accel_el = Quantity(
        1.0 * u.degree / u.second**2,
        allow_none=True,
        help="Mount elevation rate acceleration.",
    )

    scan_cosecant_modulation = Bool(
        False, help="Modulate the scan rate according to 1/sin(az) for uniform depth"
    )

    sun_angle_min = Quantity(
        90.0 * u.degree, help="Minimum angular distance for the scan and the Sun"
    )

    el_mod_step = Quantity(
        0.0 * u.degree, help="Amount to step elevation after each left-right scan pair"
    )

    el_mod_rate = Quantity(
        0.0 * u.Hz, help="Modulate elevation continuously at this rate"
    )

    el_mod_amplitude = Quantity(1.0 * u.degree, help="Range of elevation modulation")

    el_mod_sine = Bool(
        False, help="Modulate elevation with a sine wave instead of a triangle wave"
    )

    distribute_time = Bool(
        False,
        help="Distribute observation data along the time axis rather than detector axis",
    )

    detset_key = Unicode(
        None,
        allow_none=True,
        help="If specified, use this column of the focalplane detector_data to group detectors",
    )

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for common flags",
    )

    det_data = Unicode(
        defaults.det_data,
        allow_none=True,
        help="Observation detdata key to initialize",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to initialize",
    )

    hwp_angle = Unicode(
        None, allow_none=True, help="Observation shared key for HWP angle"
    )

    azimuth = Unicode(defaults.azimuth, help="Observation shared key for Azimuth")

    elevation = Unicode(defaults.elevation, help="Observation shared key for Elevation")

    boresight_azel = Unicode(
        defaults.boresight_azel, help="Observation shared key for boresight AZ/EL"
    )

    boresight_radec = Unicode(
        defaults.boresight_radec, help="Observation shared key for boresight RA/DEC"
    )

    position = Unicode(defaults.position, help="Observation shared key for position")

    velocity = Unicode(defaults.velocity, help="Observation shared key for velocity")

    hwp_rpm = Float(None, allow_none=True, help="The rate (in RPM) of the HWP rotation")

    hwp_step = Quantity(
        None, allow_none=True, help="For stepped HWP, the angle of each step"
    )

    hwp_step_time = Quantity(
        None, allow_none=True, help="For stepped HWP, the time between steps"
    )

    elnod_start = Bool(False, help="Perform an el-nod before the scan")

    elnod_end = Bool(False, help="Perform an el-nod after the scan")

    elnods = List([], help="List of relative el_nods")

    elnod_every_scan = Bool(False, help="Perform el nods every scan")

    scanning_interval = Unicode(
        defaults.scanning_interval, help="Interval name for scanning"
    )

    turnaround_interval = Unicode(
        defaults.turnaround_interval, help="Interval name for turnarounds"
    )

    throw_leftright_interval = Unicode(
        defaults.throw_leftright_interval,
        help="Interval name for left to right scans + turnarounds",
    )

    throw_rightleft_interval = Unicode(
        defaults.throw_rightleft_interval,
        help="Interval name for right to left scans + turnarounds",
    )

    throw_interval = Unicode(
        defaults.throw_interval, help="Interval name for scan + turnaround intervals"
    )

    scan_leftright_interval = Unicode(
        defaults.scan_leftright_interval, help="Interval name for left to right scans"
    )

    turn_leftright_interval = Unicode(
        defaults.turn_leftright_interval,
        help="Interval name for turnarounds after left to right scans",
    )

    scan_rightleft_interval = Unicode(
        defaults.scan_rightleft_interval, help="Interval name for right to left scans"
    )

    turn_rightleft_interval = Unicode(
        defaults.turn_rightleft_interval,
        help="Interval name for turnarounds after right to left scans",
    )

    elnod_interval = Unicode(defaults.elnod_interval, help="Interval name for elnods")

    sun_up_interval = Unicode(
        defaults.sun_up_interval, help="Interval name for times when the sun is up"
    )

    sun_close_interval = Unicode(
        defaults.sun_close_interval,
        help="Interval name for times when the sun is close",
    )

    sun_close_distance = Quantity(45.0 * u.degree, help="'Sun close' flagging distance")

    max_pwv = Quantity(
        None, allow_none=True, help="Maximum PWV for the simulated weather."
    )

    median_weather = Bool(
        False,
        help="Use median weather parameters instead of sampling from the distributions",
    )

    turnaround_mask = Int(
        defaults.shared_mask_unstable_scanrate,
        help="Bit mask to raise turnaround flags with",
    )

    sun_up_mask = Int(
        defaults.shared_mask_sun_up, help="Bit mask to raise Sun up flags with"
    )

    sun_close_mask = Int(
        defaults.shared_mask_sun_close, help="Bit mask to raise Sun close flags with"
    )

    elnod_mask = Int(
        defaults.shared_mask_irregular,
        help="Bit mask to raise elevation nod flags with",
    )

    @traitlets.validate("telescope")
    def _check_telescope(self, proposal):
        tele = proposal["value"]
        if tele is not None:
            try:
                dets = tele.focalplane.detectors
            except Exception:
                raise traitlets.TraitError(
                    "telescope must be a Telescope instance with a focalplane"
                )
        return tele

    @traitlets.validate("use_ephem")
    def _check_use_ephem(self, proposal):
        use_ephem = proposal["value"]
        if use_ephem:
            if self.use_qpoint:
                raise traitlets.TraitError("Cannot use both ephem and qpoint")
        return use_ephem

    @traitlets.validate("use_qpoint")
    def _check_use_qpoint(self, proposal):
        use_qpoint = proposal["value"]
        if use_qpoint:
            if self.use_ephem:
                raise traitlets.TraitError("Cannot use both ephem and qpoint")
            try:
                import qpoint
            except ModuleNotFoundError as e:
                raise RuntimeError(f"Cannot use qpoint: '{e}'")
        return use_qpoint

    @traitlets.validate("schedule")
    def _check_schedule(self, proposal):
        sch = proposal["value"]
        if sch is not None:
            if not isinstance(sch, GroundSchedule):
                raise traitlets.TraitError(
                    "schedule must be an instance of a GroundSchedule"
                )
        return sch

    # Cross-check HWP parameters

    @traitlets.validate("hwp_angle")
    def _check_hwp_angle(self, proposal):
        hwp_angle = proposal["value"]
        if hwp_angle is None:
            if self.hwp_rpm is not None or self.hwp_step is not None:
                raise traitlets.TraitError(
                    "Cannot simulate HWP without a shared data key"
                )
        else:
            if self.hwp_rpm is None and self.hwp_step is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_angle

    @traitlets.validate("hwp_rpm")
    def _check_hwp_rpm(self, proposal):
        hwp_rpm = proposal["value"]
        if hwp_rpm is not None:
            if self.hwp_angle is None:
                raise traitlets.TraitError(
                    "Cannot simulate rotating HWP without a shared data key"
                )
            if self.hwp_step is not None:
                raise traitlets.TraitError("HWP cannot rotate *and* step.")
        else:
            if self.hwp_angle is not None and self.hwp_step is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_rpm

    @traitlets.validate("hwp_step")
    def _check_hwp_step(self, proposal):
        hwp_step = proposal["value"]
        if hwp_step is not None:
            if self.hwp_angle is None:
                raise traitlets.TraitError(
                    "Cannot simulate stepped HWP without a shared data key"
                )
            if self.hwp_rpm is not None:
                raise traitlets.TraitError("HWP cannot rotate *and* step.")
        else:
            if self.hwp_angle is not None and self.hwp_rpm is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_step

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()
        if self.schedule is None:
            raise RuntimeError(
                "The schedule attribute must be set before calling exec()"
            )

        # Check valid combinations of options

        if (self.elnod_start or self.elnod_end) and len(self.elnods) == 0:
            raise RuntimeError(
                "If simulating elnods, you must specify the list of offsets"
            )

        if len(self.schedule.scans) == 0:
            raise RuntimeError("Schedule has no scans!")

        # Data distribution in the detector and sample directions
        comm = data.comm
        det_ranks = comm.group_size
        samp_ranks = 1
        if self.distribute_time:
            det_ranks = 1
            samp_ranks = comm.group_size

        # Get per-observation telescopes
        obs_tele = self._obs_telescopes(data, det_ranks, detectors)

        # The global start is the beginning of the first scan
        mission_start = self.schedule.scans[0].start

        # Although there is no requirement that the sampling is contiguous from one
        # session to the next, for simulations there is no need to restart the
        # sampling clock for each one.  In order to help with load balancing, we
        # distribute all observations across all sessions among process groups.
        # We distribute these in sequence to minimize the number of boresight
        # scanning calculations need to be done by each group.

        obs_info = list()

        rate = self.telescope.focalplane.sample_rate.to_value(u.Hz)
        incr = 1.0 / rate
        off = 0

        for scan in self.schedule.scans:
            ffirst = rate * (scan.start - mission_start).total_seconds()
            first = int(ffirst)
            if ffirst - first > 1.0e-3 * incr:
                first += 1
            start = first * incr + mission_start.timestamp()
            ns = 1 + int(rate * (scan.stop.timestamp() - start))
            stop = (ns - 1) * incr + mission_start.timestamp()

            # The session name is the same as the historical observation name,
            # which allows re-use of previously cached atmosphere sims.
            sname = f"{scan.name}-{scan.scan_indx}-{scan.subscan_indx}"

            for obkey, (obtele, detsets) in obs_tele.items():
                if obkey == "ALL":
                    obs_name = sname
                else:
                    obs_name = f"{sname}_{obkey}"
                obs_info.append(
                    {
                        "name": obs_name,
                        "sname": sname,
                        "obkey": obkey,
                        "scan": scan,
                        "start": start,
                        "stop": stop,
                        "samples": ns,
                        "offset": off,
                    }
                )
            off += ns

        # FIXME:  Re-enable this when using astropy for coordinate transforms.
        # # Ensure that astropy IERS is downloaded
        # astropy_control(max_future=self.schedule.scans[-1].stop)

        # Distribute the sessions uniformly among groups.  We take each scan and
        # weight it by the duration in samples.

        obs_samples = [x["samples"] for x in obs_info]
        groupdist = distribute_discrete(obs_samples, comm.ngroups)

        # Every process group creates its observations

        group_first_obs = groupdist[comm.group][0]
        group_num_obs = groupdist[comm.group][1]

        last_session = None
        for obindx in range(group_first_obs, group_first_obs + group_num_obs):
            scan = obs_info[obindx]["scan"]
            sname = obs_info[obindx]["sname"]
            obs_name = obs_info[obindx]["name"]

            sys_mem_str = memreport(
                msg="(whole node)", comm=data.comm.comm_group, silent=True
            )
            msg = f"Group {data.comm.group} begin observation {obs_name} "
            msg += f"with {sys_mem_str}"
            log.debug_rank(msg, comm=data.comm.comm_group)

            # Simulate the boresight pattern.  If this observation is in the same
            # session as the previous observation, just re-use the pointing.

            if sname != last_session:
                site = self.telescope.site
                (
                    times,
                    az,
                    el,
                    sample_sets,
                    scan_min_az,
                    scan_max_az,
                    scan_min_el,
                    scan_max_el,
                    ival_elnod,
                    ival_scan_leftright,
                    ival_scan_rightleft,
                    ival_throw_leftright,
                    ival_throw_rightleft,
                    ival_turn_leftright,
                    ival_turn_rightleft,
                ) = self._simulate_scanning(
                    site, scan, obs_info[obindx]["samples"], rate, comm, samp_ranks
                )

                # Create weather realization common to all observations in the session
                weather = None
                if self.weather is not None:
                    # Every session has a unique site with unique weather
                    # realization.
                    site = copy.deepcopy(site)
                    mid_time = scan.start + (scan.stop - scan.start) / 2
                    try:
                        weather = SimWeather(
                            time=mid_time,
                            name=self.weather,
                            site_uid=site.uid,
                            realization=self.realization,
                            max_pwv=self.max_pwv,
                            median_weather=self.median_weather,
                        )
                    except RuntimeError:
                        # must be a file
                        weather = SimWeather(
                            time=mid_time,
                            file=self.weather,
                            site_uid=site.uid,
                            realization=self.realization,
                            max_pwv=self.max_pwv,
                            median_weather=self.median_weather,
                        )
                    site.weather = weather

                session = Session(
                    sname,
                    start=datetime.fromtimestamp(times[0]).astimezone(timezone.utc),
                    end=datetime.fromtimestamp(times[-1]).astimezone(timezone.utc),
                )

            # Create the observation

            obtele, detsets = obs_tele[obs_info[obindx]["obkey"]]

            # Instantiate new telescope with site that may be unique to this session
            telescope = Telescope(
                obtele.name,
                uid=obtele.uid,
                focalplane=obtele.focalplane,
                site=site,
            )

            ob = Observation(
                comm,
                telescope,
                len(times),
                name=obs_name,
                uid=name_UID(obs_name),
                session=session,
                detector_sets=detsets,
                process_rows=det_ranks,
                sample_sets=sample_sets,
            )

            # Scan limits
            ob["scan_el"] = scan.el  # Nominal elevation
            ob["scan_min_az"] = scan_min_az * u.radian
            ob["scan_max_az"] = scan_max_az * u.radian
            ob["scan_min_el"] = scan_min_el * u.radian
            ob["scan_max_el"] = scan_max_el * u.radian

            # Create and set shared objects for timestamps, position, velocity, and
            # boresight.

            ob.shared.create_column(
                self.times,
                shape=(ob.n_local_samples,),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.position,
                shape=(ob.n_local_samples, 3),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.velocity,
                shape=(ob.n_local_samples, 3),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.azimuth,
                shape=(ob.n_local_samples,),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.elevation,
                shape=(ob.n_local_samples,),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.boresight_azel,
                shape=(ob.n_local_samples, 4),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.boresight_radec,
                shape=(ob.n_local_samples, 4),
                dtype=np.float64,
            )

            # Optionally initialize detector data.  Note that the
            # detectors in each observation have already been pruned
            # during the splitting.

            if self.det_data is not None:
                exists_data = ob.detdata.ensure(
                    self.det_data,
                    dtype=np.float64,
                    create_units=self.det_data_units,
                )

            if self.det_flags is not None:
                exists_flags = ob.detdata.ensure(
                    self.det_flags,
                    dtype=np.uint8,
                )

            # Only the first rank of the process grid columns sets / computes these.

            if sname != last_session:
                stamps = None
                position = None
                velocity = None
                az_data = None
                el_data = None
                bore_azel = None
                bore_radec = None

                if ob.comm_col_rank == 0:
                    stamps = times[
                        ob.local_index_offset : ob.local_index_offset
                        + ob.n_local_samples
                    ]
                    az_data = az[
                        ob.local_index_offset : ob.local_index_offset
                        + ob.n_local_samples
                    ]
                    el_data = el[
                        ob.local_index_offset : ob.local_index_offset
                        + ob.n_local_samples
                    ]
                    # Get the motion of the site for these times.
                    position, velocity = site.position_velocity(stamps)
                    # Convert Az / El to quaternions.  Remember that the azimuth is
                    # measured clockwise and the longitude counter-clockwise.  We define
                    # the focalplane coordinate X-axis to be pointed in the direction
                    # of decreasing elevation.
                    bore_azel = qa.from_lonlat_angles(
                        -(az_data), el_data, np.zeros_like(el_data)
                    )

                    if scan.boresight_angle.to_value(u.radian) != 0:
                        zaxis = np.array([0, 0, 1.0])
                        rot = qa.rotation(
                            zaxis, scan.boresight_angle.to_value(u.radian)
                        )
                        bore_azel = qa.mult(bore_azel, rot)
                    # Convert to RA / DEC.  Use pyephem for now.
                    bore_radec = azel_to_radec(
                        site,
                        stamps,
                        bore_azel,
                        use_ephem=self.use_ephem,
                        use_qpoint=self.use_qpoint,
                    )

            ob.shared[self.times].set(stamps, offset=(0,), fromrank=0)
            ob.shared[self.azimuth].set(az_data, offset=(0,), fromrank=0)
            ob.shared[self.elevation].set(el_data, offset=(0,), fromrank=0)
            ob.shared[self.position].set(position, offset=(0, 0), fromrank=0)
            ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0)
            ob.shared[self.boresight_azel].set(bore_azel, offset=(0, 0), fromrank=0)
            ob.shared[self.boresight_radec].set(bore_radec, offset=(0, 0), fromrank=0)

            # Simulate HWP angle

            simulate_hwp_response(
                ob,
                ob_time_key=self.times,
                ob_angle_key=self.hwp_angle,
                ob_mueller_key=None,
                hwp_start=obs_info[obindx]["start"] * u.second,
                hwp_rpm=self.hwp_rpm,
                hwp_step=self.hwp_step,
                hwp_step_time=self.hwp_step_time,
            )

            # Create interval lists for our motion.  Since we simulated the scan on
            # every process, we don't need to communicate the global timespans of the
            # intervals (using create or create_col).  We can just create them directly.

            ob.intervals[self.throw_leftright_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_throw_leftright
            )
            ob.intervals[self.throw_rightleft_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_throw_rightleft
            )
            ob.intervals[self.throw_interval] = (
                ob.intervals[self.throw_leftright_interval]
                | ob.intervals[self.throw_rightleft_interval]
            )
            ob.intervals[self.scan_leftright_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_scan_leftright
            )
            ob.intervals[self.turn_leftright_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_turn_leftright
            )
            ob.intervals[self.scan_rightleft_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_scan_rightleft
            )
            ob.intervals[self.turn_rightleft_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_turn_rightleft
            )
            ob.intervals[self.elnod_interval] = IntervalList(
                ob.shared[self.times], timespans=ival_elnod
            )
            ob.intervals[self.scanning_interval] = (
                ob.intervals[self.scan_leftright_interval]
                | ob.intervals[self.scan_rightleft_interval]
            )
            ob.intervals[self.turnaround_interval] = (
                ob.intervals[self.turn_leftright_interval]
                | ob.intervals[self.turn_rightleft_interval]
            )

            # Get the Sun's position in horizontal coordinates and define
            # "Sun up" and "Sun close" intervals according to it

            add_solar_intervals(
                ob.intervals,
                site,
                ob.shared[self.times],
                ob.shared[self.azimuth].data,
                ob.shared[self.elevation].data,
                self.sun_up_interval,
                self.sun_close_interval,
                self.sun_close_distance,
            )

            msg = f"Group {data.comm.group} finished observation {obs_name}:\n"
            msg += f"{ob}"
            log.verbose_rank(msg, comm=data.comm.comm_group)

            obmem = ob.memory_use()
            obmem_gb = obmem / 1024**3
            msg = f"Observation {ob.name} using {obmem_gb:0.2f} GB of total memory"
            log.debug_rank(msg, comm=ob.comm.comm_group)

            data.obs.append(ob)
            last_session = sname

        # For convenience, we additionally create a shared flag field with bits set
        # according to the different intervals.  This basically just saves workflows
        # from calling the FlagIntervals operator themselves.  Here we set the bits
        # according to what was done in toast2, so the scanning interval has no bits
        # set.

        flag_intervals = FlagIntervals(
            shared_flags=self.shared_flags,
            shared_flag_bytes=1,
            view_mask=[
                (self.turnaround_interval, self.turnaround_mask),
                (self.sun_up_interval, self.sun_up_mask),
                (self.sun_close_interval, self.sun_close_mask),
                (self.elnod_interval, self.elnod_mask),
            ],
        )
        flag_intervals.apply(data, detectors=None)

    def _simulate_scanning(self, site, scan, n_samples, rate, comm, samp_ranks):
        """Simulate the boresight Az/El pointing for one session."""
        log = Logger.get()

        # Currently, El nods happen before or after the formal scan start / end.
        # This means that we don't know ahead of time the total number of samples
        # in the observation.  That in turn means we cannot create the observation
        # until after we simulate the motion, and therefore we do not yet have the
        # the process grid established.  Normally only rank zero of each grid
        # column would compute and store this data in shared memory.  However, since
        # we do not have that grid yet, every process simulates the scan.  This
        # should be relatively cheap.

        incr = 1.0 / rate

        # Track the az / el range of all motion during this scan, including
        # el nods and any el modulation / steps.  These will be stored as
        # observation metadata after the simulation.
        scan_min_el = scan.el.to_value(u.radian)
        scan_max_el = scan_min_el
        scan_min_az = scan.az_min.to_value(u.radian)
        scan_max_az = scan.az_max.to_value(u.radian)

        # Time range of the science scans
        start_time = scan.start
        stop_time = start_time + timedelta(seconds=(float(n_samples - 1) / rate))

        # The total simulated scan data (including el nods)
        times = list()
        az = list()
        el = list()

        # The time ranges we will build up to construct intervals later
        ival_elnod = list()
        ival_scan_leftright = None
        ival_turn_leftright = None
        ival_scan_rightleft = None
        ival_turn_rightleft = None

        # Compute relative El Nod steps
        elnod_el = None
        elnod_az = None
        if len(self.elnods) > 0:
            elnod_el = np.array([(scan.el + x).to_value(u.radian) for x in self.elnods])
            elnod_az = np.zeros_like(elnod_el) + scan.az_min.to_value(u.radian)

        # Sample sets.  Although Observations support data distribution in any
        # shape process grid, this operator only supports 2 cases:  distributing
        # by detector and distributing by time.  We want to ensure that

        sample_sets = list()

        # Do starting El nod.  We do this before the start of the scheduled scan.
        if self.elnod_start:
            (
                elnod_times,
                elnod_az_data,
                elnod_el_data,
                scan_min_az,
                scan_max_az,
                scan_min_el,
                scan_max_el,
            ) = simulate_elnod(
                scan.start.timestamp(),
                rate,
                scan.az_min.to_value(u.radian),
                scan.el.to_value(u.radian),
                self.scan_rate_az.to_value(u.radian / u.second),
                self.scan_accel_az.to_value(u.radian / u.second**2),
                self.scan_rate_el.to_value(u.radian / u.second),
                self.scan_accel_el.to_value(u.radian / u.second**2),
                elnod_el,
                elnod_az,
                scan_min_az,
                scan_max_az,
                scan_min_el,
                scan_max_el,
            )
            if len(elnod_times) > 0:
                # Shift these elnod times so that they end one sample before the
                # start of the scan.
                sample_sets.append([len(elnod_times)])
                t_elnod = elnod_times[-1] - elnod_times[0]
                elnod_times -= t_elnod + incr
                times.append(elnod_times)
                az.append(elnod_az_data)
                el.append(elnod_el_data)
                ival_elnod.append((elnod_times[0], elnod_times[-1]))

        # Now do the main scan
        (
            scan_times,
            scan_az_data,
            scan_el_data,
            scan_min_az,
            scan_max_az,
            ival_scan_leftright,
            ival_turn_leftright,
            ival_scan_rightleft,
            ival_turn_rightleft,
            ival_throw_leftright,
            ival_throw_rightleft,
        ) = simulate_ces_scan(
            site,
            start_time.timestamp(),
            stop_time.timestamp(),
            rate,
            scan.el.to_value(u.radian),
            scan.az_min.to_value(u.radian),
            scan.az_max.to_value(u.radian),
            scan.az_min.to_value(u.radian),
            self.scan_rate_az.to_value(u.radian / u.second),
            self.fix_rate_on_sky,
            self.scan_accel_az.to_value(u.radian / u.second**2),
            scan_min_az,
            scan_max_az,
            cosecant_modulation=self.scan_cosecant_modulation,
            randomize_phase=self.randomize_phase,
            track_azimuth=self.track_azimuth,
        )

        # Do any adjustments to the El motion
        if self.el_mod_rate.to_value(u.Hz) > 0:
            scan_min_el, scan_max_el = oscillate_el(
                scan_times,
                scan_el_data,
                self.scan_rate_el.to_value(u.radian / u.second),
                self.scan_accel_el.to_value(u.radian / u.second**2),
                scan_min_el,
                scan_max_el,
                self.el_mod_amplitude.to_value(u.radian),
                self.el_mod_rate.to_value(u.Hz),
                el_mod_sine=self.el_mod_sine,
            )
        if self.el_mod_step.to_value(u.radian) > 0:
            scan_min_el, scan_max_el = step_el(
                scan_times,
                scan_az_data,
                scan_el_data,
                self.scan_rate_el.to_value(u.radian / u.second),
                self.scan_accel_el.to_value(u.radian / u.second**2),
                scan_min_el,
                scan_max_el,
                self.el_mod_step.to_value(u.radian),
            )

        # When distributing data, ensure that each process has a whole number of
        # complete scans.
        scan_indices = np.searchsorted(
            scan_times, [x[0] for x in ival_scan_leftright], side="left"
        )
        sample_sets.extend([[x] for x in scan_indices[1:] - scan_indices[:-1]])
        remainder = len(scan_times) - scan_indices[-1]
        if remainder > 0:
            sample_sets.append([remainder])

        times.append(scan_times)
        az.append(scan_az_data)
        el.append(scan_el_data)

        # FIXME:  The CES scan simulation above ends abruptly.  We should implement
        # a deceleration to zero in Az here before doing the final el nod.

        # Do ending El nod.  Start this one sample after the science scan.
        if self.elnod_end:
            (
                elnod_times,
                elnod_az_data,
                elnod_el_data,
                scan_min_az,
                scan_max_az,
                scan_min_el,
                scan_max_el,
            ) = simulate_elnod(
                scan_times[-1] + incr,
                rate,
                scan_az_data[-1],
                scan_el_data[-1],
                self.scan_rate_az.to_value(u.radian / u.second),
                self.scan_accel_az.to_value(u.radian / u.second**2),
                self.scan_rate_el.to_value(u.radian / u.second),
                self.scan_accel_el.to_value(u.radian / u.second**2),
                elnod_el,
                elnod_az,
                scan_min_az,
                scan_max_az,
                scan_min_el,
                scan_max_el,
            )
            if len(elnod_times) > 0:
                sample_sets.append([len(elnod_times)])
                times.append(elnod_times)
                az.append(elnod_az_data)
                el.append(elnod_el_data)
                ival_elnod.append((elnod_times[0], elnod_times[-1]))

        times = np.hstack(times)
        az = np.hstack(az)
        el = np.hstack(el)

        # If we are distributing by time, ensure we have enough sample sets for the
        # number of processes.
        if self.distribute_time:
            if samp_ranks > len(sample_sets):
                if comm.group_rank == 0:
                    msg = f"Group {comm.group} with {comm.group_size} processes cannot distribute {len(sample_sets)} sample sets."
                    log.error(msg)
                    raise RuntimeError(msg)

        return (
            times,
            az,
            el,
            sample_sets,
            scan_min_az,
            scan_max_az,
            scan_min_el,
            scan_max_el,
            ival_elnod,
            ival_scan_leftright,
            ival_scan_rightleft,
            ival_throw_leftright,
            ival_throw_rightleft,
            ival_turn_leftright,
            ival_turn_rightleft,
        )

    def _obs_telescopes(self, data, det_ranks, detectors):
        """Split our session telescope by focalplane key."""

        log = Logger.get()

        session_tele_name = self.telescope.name
        session_tele_uid = self.telescope.uid
        site = self.telescope.site
        session_fp = self.telescope.focalplane

        if self.session_split_key is None and detectors is None:
            # All one observation and all detectors.
            if self.detset_key is None:
                detsets = None
            else:
                detsets = session_fp.detector_groups(self.detset_key)
            return {"ALL": (self.telescope, detsets)}

        # First cut the table down to only the detectors we are considering
        fp_input = session_fp.detector_data
        if detectors is None:
            keep_rows = [True for x in range(len(fp_input))]
        else:
            dcheck = set(detectors)
            keep_rows = [True if x in dcheck else False for x in fp_input["name"]]

        session_keys = np.unique(fp_input[self.session_split_key])

        obs_tele = dict()
        for key in session_keys:
            fp_rows = np.logical_and(
                fp_input[self.session_split_key] == key,
                keep_rows,
            )
            fp_detdata = QTable(fp_input[fp_rows])

            fp = Focalplane(
                detector_data=fp_detdata,
                sample_rate=session_fp.sample_rate,
                field_of_view=session_fp.field_of_view,
                thinfp=session_fp.thinfp,
            )

            # List of detectors in this pipeline
            pipedets = None
            if detectors is None:
                pipedets = fp.detectors
            else:
                pipedets = list()
                check_det = set(detectors)
                for det in fp.detectors:
                    if det in check_det:
                        pipedets.append(det)

            # Group by detector sets
            if self.detset_key is None:
                detsets = None
            else:
                dsets = fp.detector_groups(self.detset_key)
                if detectors is None:
                    detsets = dsets
                else:
                    # Prune to include only the detectors we are using.
                    detsets = dict()
                    pipe_check = set(pipedets)
                    for k, v in dsets.items():
                        detsets[k] = list()
                        for d in v:
                            if d in pipe_check:
                                detsets[k].append(d)

            # Verify that we have enough detector data for all of our processes.  If we
            # are distributing by time, we check the sample sets on a per-observation
            # basis later.  If we are distributing by detector, we must have at least
            # one detector set for each process.

            if not self.distribute_time:
                # distributing by detector...
                n_detset = None
                if detsets is None:
                    # Every detector is independently distributed
                    n_detset = len(pipedets)
                else:
                    n_detset = len(detsets)
                if det_ranks > n_detset:
                    if data.comm.group_rank == 0:
                        msg = f"Group {data.comm.group} with {data.comm.group_size}"
                        msg += f" processes cannot distribute {n_detset} detector sets."
                        log.error(msg)
                        raise RuntimeError(msg)

            safe_key = str(key).replace(" ", "-")
            tele_name = f"{session_tele_name}_{safe_key}"
            obs_tele[safe_key] = (
                Telescope(
                    tele_name,
                    uid=session_tele_uid,
                    focalplane=fp,
                    site=site,
                ),
                detsets,
            )
        return obs_tele

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        return dict()

    def _provides(self):
        prov = {
            "shared": [
                self.times,
                self.shared_flags,
                self.azimuth,
                self.elevation,
                self.boresight_azel,
                self.boresight_radec,
                self.hwp_angle,
                self.position,
                self.velocity,
            ],
            "detdata": list(),
            "intervals": [
                self.scanning_interval,
                self.turnaround_interval,
                self.scan_leftright_interval,
                self.scan_rightleft_interval,
                self.turn_leftright_interval,
                self.turn_rightleft_interval,
                self.throw_interval,
                self.throw_leftright_interval,
                self.throw_rightleft_interval,
            ],
        }
        if self.det_data is not None:
            prov["detdata"].append(self.det_data)
        if self.det_flags is not None:
            prov["detdata"].append(self.det_flags)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

azimuth = Unicode(defaults.azimuth, help='Observation shared key for Azimuth') class-attribute instance-attribute

boresight_azel = Unicode(defaults.boresight_azel, help='Observation shared key for boresight AZ/EL') class-attribute instance-attribute

boresight_radec = Unicode(defaults.boresight_radec, help='Observation shared key for boresight RA/DEC') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, allow_none=True, help='Observation detdata key to initialize') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to initialize') class-attribute instance-attribute

detset_key = Unicode(None, allow_none=True, help='If specified, use this column of the focalplane detector_data to group detectors') class-attribute instance-attribute

distribute_time = Bool(False, help='Distribute observation data along the time axis rather than detector axis') class-attribute instance-attribute

el_mod_amplitude = Quantity(1.0 * u.degree, help='Range of elevation modulation') class-attribute instance-attribute

el_mod_rate = Quantity(0.0 * u.Hz, help='Modulate elevation continuously at this rate') class-attribute instance-attribute

el_mod_sine = Bool(False, help='Modulate elevation with a sine wave instead of a triangle wave') class-attribute instance-attribute

el_mod_step = Quantity(0.0 * u.degree, help='Amount to step elevation after each left-right scan pair') class-attribute instance-attribute

elevation = Unicode(defaults.elevation, help='Observation shared key for Elevation') class-attribute instance-attribute

elnod_end = Bool(False, help='Perform an el-nod after the scan') class-attribute instance-attribute

elnod_every_scan = Bool(False, help='Perform el nods every scan') class-attribute instance-attribute

elnod_interval = Unicode(defaults.elnod_interval, help='Interval name for elnods') class-attribute instance-attribute

elnod_mask = Int(defaults.shared_mask_irregular, help='Bit mask to raise elevation nod flags with') class-attribute instance-attribute

elnod_start = Bool(False, help='Perform an el-nod before the scan') class-attribute instance-attribute

elnods = List([], help='List of relative el_nods') class-attribute instance-attribute

fix_rate_on_sky = Bool(True, help='If True, `scan_rate_az` is given in sky coordinates and azimuthal rate on mount will be adjusted to meet it. If False, `scan_rate_az` is used as the mount azimuthal rate.') class-attribute instance-attribute

hwp_angle = Unicode(None, allow_none=True, help='Observation shared key for HWP angle') class-attribute instance-attribute

hwp_rpm = Float(None, allow_none=True, help='The rate (in RPM) of the HWP rotation') class-attribute instance-attribute

hwp_step = Quantity(None, allow_none=True, help='For stepped HWP, the angle of each step') class-attribute instance-attribute

hwp_step_time = Quantity(None, allow_none=True, help='For stepped HWP, the time between steps') class-attribute instance-attribute

max_pwv = Quantity(None, allow_none=True, help='Maximum PWV for the simulated weather.') class-attribute instance-attribute

median_weather = Bool(False, help='Use median weather parameters instead of sampling from the distributions') class-attribute instance-attribute

position = Unicode(defaults.position, help='Observation shared key for position') class-attribute instance-attribute

randomize_phase = Bool(False, help='If True, the Constant Elevation Scan will begin at a randomized phase.') class-attribute instance-attribute

realization = Int(0, help='The realization index') class-attribute instance-attribute

scan_accel_az = Quantity(1.0 * u.degree / u.second ** 2, help='Mount scanning rate acceleration for turnarounds') class-attribute instance-attribute

scan_accel_el = Quantity(1.0 * u.degree / u.second ** 2, allow_none=True, help='Mount elevation rate acceleration.') class-attribute instance-attribute

scan_cosecant_modulation = Bool(False, help='Modulate the scan rate according to 1/sin(az) for uniform depth') class-attribute instance-attribute

scan_leftright_interval = Unicode(defaults.scan_leftright_interval, help='Interval name for left to right scans') class-attribute instance-attribute

scan_rate_az = Quantity(1.0 * u.degree / u.second, help='The sky or mount azimuth scanning rate. See `fix_rate_on_sky`') class-attribute instance-attribute

scan_rate_el = Quantity(1.0 * u.degree / u.second, allow_none=True, help='The sky elevation scanning rate') class-attribute instance-attribute

scan_rightleft_interval = Unicode(defaults.scan_rightleft_interval, help='Interval name for right to left scans') class-attribute instance-attribute

scanning_interval = Unicode(defaults.scanning_interval, help='Interval name for scanning') class-attribute instance-attribute

schedule = Instance(klass=GroundSchedule, allow_none=True, help='Instance of a GroundSchedule') class-attribute instance-attribute

session_split_key = Unicode(None, allow_none=True, help='Focalplane key for splitting into observations') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for common flags') class-attribute instance-attribute

sun_angle_min = Quantity(90.0 * u.degree, help='Minimum angular distance for the scan and the Sun') class-attribute instance-attribute

sun_close_distance = Quantity(45.0 * u.degree, help="'Sun close' flagging distance") class-attribute instance-attribute

sun_close_interval = Unicode(defaults.sun_close_interval, help='Interval name for times when the sun is close') class-attribute instance-attribute

sun_close_mask = Int(defaults.shared_mask_sun_close, help='Bit mask to raise Sun close flags with') class-attribute instance-attribute

sun_up_interval = Unicode(defaults.sun_up_interval, help='Interval name for times when the sun is up') class-attribute instance-attribute

sun_up_mask = Int(defaults.shared_mask_sun_up, help='Bit mask to raise Sun up flags with') class-attribute instance-attribute

telescope = Instance(klass=Telescope, allow_none=True, help='This must be an instance of a Telescope') class-attribute instance-attribute

throw_interval = Unicode(defaults.throw_interval, help='Interval name for scan + turnaround intervals') class-attribute instance-attribute

throw_leftright_interval = Unicode(defaults.throw_leftright_interval, help='Interval name for left to right scans + turnarounds') class-attribute instance-attribute

throw_rightleft_interval = Unicode(defaults.throw_rightleft_interval, help='Interval name for right to left scans + turnarounds') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

track_azimuth = Bool(False, help='If True, the azimuth throw is continually adjusted to center the field.') class-attribute instance-attribute

turn_leftright_interval = Unicode(defaults.turn_leftright_interval, help='Interval name for turnarounds after left to right scans') class-attribute instance-attribute

turn_rightleft_interval = Unicode(defaults.turn_rightleft_interval, help='Interval name for turnarounds after right to left scans') class-attribute instance-attribute

turnaround_interval = Unicode(defaults.turnaround_interval, help='Interval name for turnarounds') class-attribute instance-attribute

turnaround_mask = Int(defaults.shared_mask_unstable_scanrate, help='Bit mask to raise turnaround flags with') class-attribute instance-attribute

use_ephem = Bool(True, help='Use PyEphem to convert between horizontal and equatorial systems') class-attribute instance-attribute

use_qpoint = Bool(False, help='Use qpoint to convert between horizontal and equatorial systems') class-attribute instance-attribute

velocity = Unicode(defaults.velocity, help='Observation shared key for velocity') class-attribute instance-attribute

weather = Unicode(None, allow_none=True, help="Name of built-in weather site (e.g. 'atacama', 'south_pole') or path to HDF5 file") class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_ground.py
405
406
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_hwp_angle(proposal)

Source code in toast/ops/sim_ground.py
362
363
364
365
366
367
368
369
370
371
372
373
@traitlets.validate("hwp_angle")
def _check_hwp_angle(self, proposal):
    hwp_angle = proposal["value"]
    if hwp_angle is None:
        if self.hwp_rpm is not None or self.hwp_step is not None:
            raise traitlets.TraitError(
                "Cannot simulate HWP without a shared data key"
            )
    else:
        if self.hwp_rpm is None and self.hwp_step is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_angle

_check_hwp_rpm(proposal)

Source code in toast/ops/sim_ground.py
375
376
377
378
379
380
381
382
383
384
385
386
387
388
@traitlets.validate("hwp_rpm")
def _check_hwp_rpm(self, proposal):
    hwp_rpm = proposal["value"]
    if hwp_rpm is not None:
        if self.hwp_angle is None:
            raise traitlets.TraitError(
                "Cannot simulate rotating HWP without a shared data key"
            )
        if self.hwp_step is not None:
            raise traitlets.TraitError("HWP cannot rotate *and* step.")
    else:
        if self.hwp_angle is not None and self.hwp_step is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_rpm

_check_hwp_step(proposal)

Source code in toast/ops/sim_ground.py
390
391
392
393
394
395
396
397
398
399
400
401
402
403
@traitlets.validate("hwp_step")
def _check_hwp_step(self, proposal):
    hwp_step = proposal["value"]
    if hwp_step is not None:
        if self.hwp_angle is None:
            raise traitlets.TraitError(
                "Cannot simulate stepped HWP without a shared data key"
            )
        if self.hwp_rpm is not None:
            raise traitlets.TraitError("HWP cannot rotate *and* step.")
    else:
        if self.hwp_angle is not None and self.hwp_rpm is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_step

_check_schedule(proposal)

Source code in toast/ops/sim_ground.py
350
351
352
353
354
355
356
357
358
@traitlets.validate("schedule")
def _check_schedule(self, proposal):
    sch = proposal["value"]
    if sch is not None:
        if not isinstance(sch, GroundSchedule):
            raise traitlets.TraitError(
                "schedule must be an instance of a GroundSchedule"
            )
    return sch

_check_telescope(proposal)

Source code in toast/ops/sim_ground.py
318
319
320
321
322
323
324
325
326
327
328
@traitlets.validate("telescope")
def _check_telescope(self, proposal):
    tele = proposal["value"]
    if tele is not None:
        try:
            dets = tele.focalplane.detectors
        except Exception:
            raise traitlets.TraitError(
                "telescope must be a Telescope instance with a focalplane"
            )
    return tele

_check_use_ephem(proposal)

Source code in toast/ops/sim_ground.py
330
331
332
333
334
335
336
@traitlets.validate("use_ephem")
def _check_use_ephem(self, proposal):
    use_ephem = proposal["value"]
    if use_ephem:
        if self.use_qpoint:
            raise traitlets.TraitError("Cannot use both ephem and qpoint")
    return use_ephem

_check_use_qpoint(proposal)

Source code in toast/ops/sim_ground.py
338
339
340
341
342
343
344
345
346
347
348
@traitlets.validate("use_qpoint")
def _check_use_qpoint(self, proposal):
    use_qpoint = proposal["value"]
    if use_qpoint:
        if self.use_ephem:
            raise traitlets.TraitError("Cannot use both ephem and qpoint")
        try:
            import qpoint
        except ModuleNotFoundError as e:
            raise RuntimeError(f"Cannot use qpoint: '{e}'")
    return use_qpoint

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_ground.py
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()
    if self.schedule is None:
        raise RuntimeError(
            "The schedule attribute must be set before calling exec()"
        )

    # Check valid combinations of options

    if (self.elnod_start or self.elnod_end) and len(self.elnods) == 0:
        raise RuntimeError(
            "If simulating elnods, you must specify the list of offsets"
        )

    if len(self.schedule.scans) == 0:
        raise RuntimeError("Schedule has no scans!")

    # Data distribution in the detector and sample directions
    comm = data.comm
    det_ranks = comm.group_size
    samp_ranks = 1
    if self.distribute_time:
        det_ranks = 1
        samp_ranks = comm.group_size

    # Get per-observation telescopes
    obs_tele = self._obs_telescopes(data, det_ranks, detectors)

    # The global start is the beginning of the first scan
    mission_start = self.schedule.scans[0].start

    # Although there is no requirement that the sampling is contiguous from one
    # session to the next, for simulations there is no need to restart the
    # sampling clock for each one.  In order to help with load balancing, we
    # distribute all observations across all sessions among process groups.
    # We distribute these in sequence to minimize the number of boresight
    # scanning calculations need to be done by each group.

    obs_info = list()

    rate = self.telescope.focalplane.sample_rate.to_value(u.Hz)
    incr = 1.0 / rate
    off = 0

    for scan in self.schedule.scans:
        ffirst = rate * (scan.start - mission_start).total_seconds()
        first = int(ffirst)
        if ffirst - first > 1.0e-3 * incr:
            first += 1
        start = first * incr + mission_start.timestamp()
        ns = 1 + int(rate * (scan.stop.timestamp() - start))
        stop = (ns - 1) * incr + mission_start.timestamp()

        # The session name is the same as the historical observation name,
        # which allows re-use of previously cached atmosphere sims.
        sname = f"{scan.name}-{scan.scan_indx}-{scan.subscan_indx}"

        for obkey, (obtele, detsets) in obs_tele.items():
            if obkey == "ALL":
                obs_name = sname
            else:
                obs_name = f"{sname}_{obkey}"
            obs_info.append(
                {
                    "name": obs_name,
                    "sname": sname,
                    "obkey": obkey,
                    "scan": scan,
                    "start": start,
                    "stop": stop,
                    "samples": ns,
                    "offset": off,
                }
            )
        off += ns

    # FIXME:  Re-enable this when using astropy for coordinate transforms.
    # # Ensure that astropy IERS is downloaded
    # astropy_control(max_future=self.schedule.scans[-1].stop)

    # Distribute the sessions uniformly among groups.  We take each scan and
    # weight it by the duration in samples.

    obs_samples = [x["samples"] for x in obs_info]
    groupdist = distribute_discrete(obs_samples, comm.ngroups)

    # Every process group creates its observations

    group_first_obs = groupdist[comm.group][0]
    group_num_obs = groupdist[comm.group][1]

    last_session = None
    for obindx in range(group_first_obs, group_first_obs + group_num_obs):
        scan = obs_info[obindx]["scan"]
        sname = obs_info[obindx]["sname"]
        obs_name = obs_info[obindx]["name"]

        sys_mem_str = memreport(
            msg="(whole node)", comm=data.comm.comm_group, silent=True
        )
        msg = f"Group {data.comm.group} begin observation {obs_name} "
        msg += f"with {sys_mem_str}"
        log.debug_rank(msg, comm=data.comm.comm_group)

        # Simulate the boresight pattern.  If this observation is in the same
        # session as the previous observation, just re-use the pointing.

        if sname != last_session:
            site = self.telescope.site
            (
                times,
                az,
                el,
                sample_sets,
                scan_min_az,
                scan_max_az,
                scan_min_el,
                scan_max_el,
                ival_elnod,
                ival_scan_leftright,
                ival_scan_rightleft,
                ival_throw_leftright,
                ival_throw_rightleft,
                ival_turn_leftright,
                ival_turn_rightleft,
            ) = self._simulate_scanning(
                site, scan, obs_info[obindx]["samples"], rate, comm, samp_ranks
            )

            # Create weather realization common to all observations in the session
            weather = None
            if self.weather is not None:
                # Every session has a unique site with unique weather
                # realization.
                site = copy.deepcopy(site)
                mid_time = scan.start + (scan.stop - scan.start) / 2
                try:
                    weather = SimWeather(
                        time=mid_time,
                        name=self.weather,
                        site_uid=site.uid,
                        realization=self.realization,
                        max_pwv=self.max_pwv,
                        median_weather=self.median_weather,
                    )
                except RuntimeError:
                    # must be a file
                    weather = SimWeather(
                        time=mid_time,
                        file=self.weather,
                        site_uid=site.uid,
                        realization=self.realization,
                        max_pwv=self.max_pwv,
                        median_weather=self.median_weather,
                    )
                site.weather = weather

            session = Session(
                sname,
                start=datetime.fromtimestamp(times[0]).astimezone(timezone.utc),
                end=datetime.fromtimestamp(times[-1]).astimezone(timezone.utc),
            )

        # Create the observation

        obtele, detsets = obs_tele[obs_info[obindx]["obkey"]]

        # Instantiate new telescope with site that may be unique to this session
        telescope = Telescope(
            obtele.name,
            uid=obtele.uid,
            focalplane=obtele.focalplane,
            site=site,
        )

        ob = Observation(
            comm,
            telescope,
            len(times),
            name=obs_name,
            uid=name_UID(obs_name),
            session=session,
            detector_sets=detsets,
            process_rows=det_ranks,
            sample_sets=sample_sets,
        )

        # Scan limits
        ob["scan_el"] = scan.el  # Nominal elevation
        ob["scan_min_az"] = scan_min_az * u.radian
        ob["scan_max_az"] = scan_max_az * u.radian
        ob["scan_min_el"] = scan_min_el * u.radian
        ob["scan_max_el"] = scan_max_el * u.radian

        # Create and set shared objects for timestamps, position, velocity, and
        # boresight.

        ob.shared.create_column(
            self.times,
            shape=(ob.n_local_samples,),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.position,
            shape=(ob.n_local_samples, 3),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.velocity,
            shape=(ob.n_local_samples, 3),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.azimuth,
            shape=(ob.n_local_samples,),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.elevation,
            shape=(ob.n_local_samples,),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.boresight_azel,
            shape=(ob.n_local_samples, 4),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.boresight_radec,
            shape=(ob.n_local_samples, 4),
            dtype=np.float64,
        )

        # Optionally initialize detector data.  Note that the
        # detectors in each observation have already been pruned
        # during the splitting.

        if self.det_data is not None:
            exists_data = ob.detdata.ensure(
                self.det_data,
                dtype=np.float64,
                create_units=self.det_data_units,
            )

        if self.det_flags is not None:
            exists_flags = ob.detdata.ensure(
                self.det_flags,
                dtype=np.uint8,
            )

        # Only the first rank of the process grid columns sets / computes these.

        if sname != last_session:
            stamps = None
            position = None
            velocity = None
            az_data = None
            el_data = None
            bore_azel = None
            bore_radec = None

            if ob.comm_col_rank == 0:
                stamps = times[
                    ob.local_index_offset : ob.local_index_offset
                    + ob.n_local_samples
                ]
                az_data = az[
                    ob.local_index_offset : ob.local_index_offset
                    + ob.n_local_samples
                ]
                el_data = el[
                    ob.local_index_offset : ob.local_index_offset
                    + ob.n_local_samples
                ]
                # Get the motion of the site for these times.
                position, velocity = site.position_velocity(stamps)
                # Convert Az / El to quaternions.  Remember that the azimuth is
                # measured clockwise and the longitude counter-clockwise.  We define
                # the focalplane coordinate X-axis to be pointed in the direction
                # of decreasing elevation.
                bore_azel = qa.from_lonlat_angles(
                    -(az_data), el_data, np.zeros_like(el_data)
                )

                if scan.boresight_angle.to_value(u.radian) != 0:
                    zaxis = np.array([0, 0, 1.0])
                    rot = qa.rotation(
                        zaxis, scan.boresight_angle.to_value(u.radian)
                    )
                    bore_azel = qa.mult(bore_azel, rot)
                # Convert to RA / DEC.  Use pyephem for now.
                bore_radec = azel_to_radec(
                    site,
                    stamps,
                    bore_azel,
                    use_ephem=self.use_ephem,
                    use_qpoint=self.use_qpoint,
                )

        ob.shared[self.times].set(stamps, offset=(0,), fromrank=0)
        ob.shared[self.azimuth].set(az_data, offset=(0,), fromrank=0)
        ob.shared[self.elevation].set(el_data, offset=(0,), fromrank=0)
        ob.shared[self.position].set(position, offset=(0, 0), fromrank=0)
        ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0)
        ob.shared[self.boresight_azel].set(bore_azel, offset=(0, 0), fromrank=0)
        ob.shared[self.boresight_radec].set(bore_radec, offset=(0, 0), fromrank=0)

        # Simulate HWP angle

        simulate_hwp_response(
            ob,
            ob_time_key=self.times,
            ob_angle_key=self.hwp_angle,
            ob_mueller_key=None,
            hwp_start=obs_info[obindx]["start"] * u.second,
            hwp_rpm=self.hwp_rpm,
            hwp_step=self.hwp_step,
            hwp_step_time=self.hwp_step_time,
        )

        # Create interval lists for our motion.  Since we simulated the scan on
        # every process, we don't need to communicate the global timespans of the
        # intervals (using create or create_col).  We can just create them directly.

        ob.intervals[self.throw_leftright_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_throw_leftright
        )
        ob.intervals[self.throw_rightleft_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_throw_rightleft
        )
        ob.intervals[self.throw_interval] = (
            ob.intervals[self.throw_leftright_interval]
            | ob.intervals[self.throw_rightleft_interval]
        )
        ob.intervals[self.scan_leftright_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_scan_leftright
        )
        ob.intervals[self.turn_leftright_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_turn_leftright
        )
        ob.intervals[self.scan_rightleft_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_scan_rightleft
        )
        ob.intervals[self.turn_rightleft_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_turn_rightleft
        )
        ob.intervals[self.elnod_interval] = IntervalList(
            ob.shared[self.times], timespans=ival_elnod
        )
        ob.intervals[self.scanning_interval] = (
            ob.intervals[self.scan_leftright_interval]
            | ob.intervals[self.scan_rightleft_interval]
        )
        ob.intervals[self.turnaround_interval] = (
            ob.intervals[self.turn_leftright_interval]
            | ob.intervals[self.turn_rightleft_interval]
        )

        # Get the Sun's position in horizontal coordinates and define
        # "Sun up" and "Sun close" intervals according to it

        add_solar_intervals(
            ob.intervals,
            site,
            ob.shared[self.times],
            ob.shared[self.azimuth].data,
            ob.shared[self.elevation].data,
            self.sun_up_interval,
            self.sun_close_interval,
            self.sun_close_distance,
        )

        msg = f"Group {data.comm.group} finished observation {obs_name}:\n"
        msg += f"{ob}"
        log.verbose_rank(msg, comm=data.comm.comm_group)

        obmem = ob.memory_use()
        obmem_gb = obmem / 1024**3
        msg = f"Observation {ob.name} using {obmem_gb:0.2f} GB of total memory"
        log.debug_rank(msg, comm=ob.comm.comm_group)

        data.obs.append(ob)
        last_session = sname

    # For convenience, we additionally create a shared flag field with bits set
    # according to the different intervals.  This basically just saves workflows
    # from calling the FlagIntervals operator themselves.  Here we set the bits
    # according to what was done in toast2, so the scanning interval has no bits
    # set.

    flag_intervals = FlagIntervals(
        shared_flags=self.shared_flags,
        shared_flag_bytes=1,
        view_mask=[
            (self.turnaround_interval, self.turnaround_mask),
            (self.sun_up_interval, self.sun_up_mask),
            (self.sun_close_interval, self.sun_close_mask),
            (self.elnod_interval, self.elnod_mask),
        ],
    )
    flag_intervals.apply(data, detectors=None)

_finalize(data, **kwargs)

Source code in toast/ops/sim_ground.py
1142
1143
def _finalize(self, data, **kwargs):
    return

_obs_telescopes(data, det_ranks, detectors)

Split our session telescope by focalplane key.

Source code in toast/ops/sim_ground.py
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
def _obs_telescopes(self, data, det_ranks, detectors):
    """Split our session telescope by focalplane key."""

    log = Logger.get()

    session_tele_name = self.telescope.name
    session_tele_uid = self.telescope.uid
    site = self.telescope.site
    session_fp = self.telescope.focalplane

    if self.session_split_key is None and detectors is None:
        # All one observation and all detectors.
        if self.detset_key is None:
            detsets = None
        else:
            detsets = session_fp.detector_groups(self.detset_key)
        return {"ALL": (self.telescope, detsets)}

    # First cut the table down to only the detectors we are considering
    fp_input = session_fp.detector_data
    if detectors is None:
        keep_rows = [True for x in range(len(fp_input))]
    else:
        dcheck = set(detectors)
        keep_rows = [True if x in dcheck else False for x in fp_input["name"]]

    session_keys = np.unique(fp_input[self.session_split_key])

    obs_tele = dict()
    for key in session_keys:
        fp_rows = np.logical_and(
            fp_input[self.session_split_key] == key,
            keep_rows,
        )
        fp_detdata = QTable(fp_input[fp_rows])

        fp = Focalplane(
            detector_data=fp_detdata,
            sample_rate=session_fp.sample_rate,
            field_of_view=session_fp.field_of_view,
            thinfp=session_fp.thinfp,
        )

        # List of detectors in this pipeline
        pipedets = None
        if detectors is None:
            pipedets = fp.detectors
        else:
            pipedets = list()
            check_det = set(detectors)
            for det in fp.detectors:
                if det in check_det:
                    pipedets.append(det)

        # Group by detector sets
        if self.detset_key is None:
            detsets = None
        else:
            dsets = fp.detector_groups(self.detset_key)
            if detectors is None:
                detsets = dsets
            else:
                # Prune to include only the detectors we are using.
                detsets = dict()
                pipe_check = set(pipedets)
                for k, v in dsets.items():
                    detsets[k] = list()
                    for d in v:
                        if d in pipe_check:
                            detsets[k].append(d)

        # Verify that we have enough detector data for all of our processes.  If we
        # are distributing by time, we check the sample sets on a per-observation
        # basis later.  If we are distributing by detector, we must have at least
        # one detector set for each process.

        if not self.distribute_time:
            # distributing by detector...
            n_detset = None
            if detsets is None:
                # Every detector is independently distributed
                n_detset = len(pipedets)
            else:
                n_detset = len(detsets)
            if det_ranks > n_detset:
                if data.comm.group_rank == 0:
                    msg = f"Group {data.comm.group} with {data.comm.group_size}"
                    msg += f" processes cannot distribute {n_detset} detector sets."
                    log.error(msg)
                    raise RuntimeError(msg)

        safe_key = str(key).replace(" ", "-")
        tele_name = f"{session_tele_name}_{safe_key}"
        obs_tele[safe_key] = (
            Telescope(
                tele_name,
                uid=session_tele_uid,
                focalplane=fp,
                site=site,
            ),
            detsets,
        )
    return obs_tele

_provides()

Source code in toast/ops/sim_ground.py
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
def _provides(self):
    prov = {
        "shared": [
            self.times,
            self.shared_flags,
            self.azimuth,
            self.elevation,
            self.boresight_azel,
            self.boresight_radec,
            self.hwp_angle,
            self.position,
            self.velocity,
        ],
        "detdata": list(),
        "intervals": [
            self.scanning_interval,
            self.turnaround_interval,
            self.scan_leftright_interval,
            self.scan_rightleft_interval,
            self.turn_leftright_interval,
            self.turn_rightleft_interval,
            self.throw_interval,
            self.throw_leftright_interval,
            self.throw_rightleft_interval,
        ],
    }
    if self.det_data is not None:
        prov["detdata"].append(self.det_data)
    if self.det_flags is not None:
        prov["detdata"].append(self.det_flags)
    return prov

_requires()

Source code in toast/ops/sim_ground.py
1145
1146
def _requires(self):
    return dict()

_simulate_scanning(site, scan, n_samples, rate, comm, samp_ranks)

Simulate the boresight Az/El pointing for one session.

Source code in toast/ops/sim_ground.py
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
def _simulate_scanning(self, site, scan, n_samples, rate, comm, samp_ranks):
    """Simulate the boresight Az/El pointing for one session."""
    log = Logger.get()

    # Currently, El nods happen before or after the formal scan start / end.
    # This means that we don't know ahead of time the total number of samples
    # in the observation.  That in turn means we cannot create the observation
    # until after we simulate the motion, and therefore we do not yet have the
    # the process grid established.  Normally only rank zero of each grid
    # column would compute and store this data in shared memory.  However, since
    # we do not have that grid yet, every process simulates the scan.  This
    # should be relatively cheap.

    incr = 1.0 / rate

    # Track the az / el range of all motion during this scan, including
    # el nods and any el modulation / steps.  These will be stored as
    # observation metadata after the simulation.
    scan_min_el = scan.el.to_value(u.radian)
    scan_max_el = scan_min_el
    scan_min_az = scan.az_min.to_value(u.radian)
    scan_max_az = scan.az_max.to_value(u.radian)

    # Time range of the science scans
    start_time = scan.start
    stop_time = start_time + timedelta(seconds=(float(n_samples - 1) / rate))

    # The total simulated scan data (including el nods)
    times = list()
    az = list()
    el = list()

    # The time ranges we will build up to construct intervals later
    ival_elnod = list()
    ival_scan_leftright = None
    ival_turn_leftright = None
    ival_scan_rightleft = None
    ival_turn_rightleft = None

    # Compute relative El Nod steps
    elnod_el = None
    elnod_az = None
    if len(self.elnods) > 0:
        elnod_el = np.array([(scan.el + x).to_value(u.radian) for x in self.elnods])
        elnod_az = np.zeros_like(elnod_el) + scan.az_min.to_value(u.radian)

    # Sample sets.  Although Observations support data distribution in any
    # shape process grid, this operator only supports 2 cases:  distributing
    # by detector and distributing by time.  We want to ensure that

    sample_sets = list()

    # Do starting El nod.  We do this before the start of the scheduled scan.
    if self.elnod_start:
        (
            elnod_times,
            elnod_az_data,
            elnod_el_data,
            scan_min_az,
            scan_max_az,
            scan_min_el,
            scan_max_el,
        ) = simulate_elnod(
            scan.start.timestamp(),
            rate,
            scan.az_min.to_value(u.radian),
            scan.el.to_value(u.radian),
            self.scan_rate_az.to_value(u.radian / u.second),
            self.scan_accel_az.to_value(u.radian / u.second**2),
            self.scan_rate_el.to_value(u.radian / u.second),
            self.scan_accel_el.to_value(u.radian / u.second**2),
            elnod_el,
            elnod_az,
            scan_min_az,
            scan_max_az,
            scan_min_el,
            scan_max_el,
        )
        if len(elnod_times) > 0:
            # Shift these elnod times so that they end one sample before the
            # start of the scan.
            sample_sets.append([len(elnod_times)])
            t_elnod = elnod_times[-1] - elnod_times[0]
            elnod_times -= t_elnod + incr
            times.append(elnod_times)
            az.append(elnod_az_data)
            el.append(elnod_el_data)
            ival_elnod.append((elnod_times[0], elnod_times[-1]))

    # Now do the main scan
    (
        scan_times,
        scan_az_data,
        scan_el_data,
        scan_min_az,
        scan_max_az,
        ival_scan_leftright,
        ival_turn_leftright,
        ival_scan_rightleft,
        ival_turn_rightleft,
        ival_throw_leftright,
        ival_throw_rightleft,
    ) = simulate_ces_scan(
        site,
        start_time.timestamp(),
        stop_time.timestamp(),
        rate,
        scan.el.to_value(u.radian),
        scan.az_min.to_value(u.radian),
        scan.az_max.to_value(u.radian),
        scan.az_min.to_value(u.radian),
        self.scan_rate_az.to_value(u.radian / u.second),
        self.fix_rate_on_sky,
        self.scan_accel_az.to_value(u.radian / u.second**2),
        scan_min_az,
        scan_max_az,
        cosecant_modulation=self.scan_cosecant_modulation,
        randomize_phase=self.randomize_phase,
        track_azimuth=self.track_azimuth,
    )

    # Do any adjustments to the El motion
    if self.el_mod_rate.to_value(u.Hz) > 0:
        scan_min_el, scan_max_el = oscillate_el(
            scan_times,
            scan_el_data,
            self.scan_rate_el.to_value(u.radian / u.second),
            self.scan_accel_el.to_value(u.radian / u.second**2),
            scan_min_el,
            scan_max_el,
            self.el_mod_amplitude.to_value(u.radian),
            self.el_mod_rate.to_value(u.Hz),
            el_mod_sine=self.el_mod_sine,
        )
    if self.el_mod_step.to_value(u.radian) > 0:
        scan_min_el, scan_max_el = step_el(
            scan_times,
            scan_az_data,
            scan_el_data,
            self.scan_rate_el.to_value(u.radian / u.second),
            self.scan_accel_el.to_value(u.radian / u.second**2),
            scan_min_el,
            scan_max_el,
            self.el_mod_step.to_value(u.radian),
        )

    # When distributing data, ensure that each process has a whole number of
    # complete scans.
    scan_indices = np.searchsorted(
        scan_times, [x[0] for x in ival_scan_leftright], side="left"
    )
    sample_sets.extend([[x] for x in scan_indices[1:] - scan_indices[:-1]])
    remainder = len(scan_times) - scan_indices[-1]
    if remainder > 0:
        sample_sets.append([remainder])

    times.append(scan_times)
    az.append(scan_az_data)
    el.append(scan_el_data)

    # FIXME:  The CES scan simulation above ends abruptly.  We should implement
    # a deceleration to zero in Az here before doing the final el nod.

    # Do ending El nod.  Start this one sample after the science scan.
    if self.elnod_end:
        (
            elnod_times,
            elnod_az_data,
            elnod_el_data,
            scan_min_az,
            scan_max_az,
            scan_min_el,
            scan_max_el,
        ) = simulate_elnod(
            scan_times[-1] + incr,
            rate,
            scan_az_data[-1],
            scan_el_data[-1],
            self.scan_rate_az.to_value(u.radian / u.second),
            self.scan_accel_az.to_value(u.radian / u.second**2),
            self.scan_rate_el.to_value(u.radian / u.second),
            self.scan_accel_el.to_value(u.radian / u.second**2),
            elnod_el,
            elnod_az,
            scan_min_az,
            scan_max_az,
            scan_min_el,
            scan_max_el,
        )
        if len(elnod_times) > 0:
            sample_sets.append([len(elnod_times)])
            times.append(elnod_times)
            az.append(elnod_az_data)
            el.append(elnod_el_data)
            ival_elnod.append((elnod_times[0], elnod_times[-1]))

    times = np.hstack(times)
    az = np.hstack(az)
    el = np.hstack(el)

    # If we are distributing by time, ensure we have enough sample sets for the
    # number of processes.
    if self.distribute_time:
        if samp_ranks > len(sample_sets):
            if comm.group_rank == 0:
                msg = f"Group {comm.group} with {comm.group_size} processes cannot distribute {len(sample_sets)} sample sets."
                log.error(msg)
                raise RuntimeError(msg)

    return (
        times,
        az,
        el,
        sample_sets,
        scan_min_az,
        scan_max_az,
        scan_min_el,
        scan_max_el,
        ival_elnod,
        ival_scan_leftright,
        ival_scan_rightleft,
        ival_throw_leftright,
        ival_throw_rightleft,
        ival_turn_leftright,
        ival_turn_rightleft,
    )

toast.ops.SimSatellite

Bases: Operator

Simulate a generic satellite motion.

This simulates satellite pointing in regular intervals ("science scans") that may have some gaps in between for cooler cycles or other events. The precession axis (anti-sun direction) is continuously slewed.

To be consistent with the ground simulation facilities, the satellite pointing is expressed in the ICRS (equatorial) system by default. Detector pointing expansion can rotate the output pointing to any other reference frame.

Source code in toast/ops/sim_satellite.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
@trait_docs
class SimSatellite(Operator):
    """Simulate a generic satellite motion.

    This simulates satellite pointing in regular intervals ("science scans") that
    may have some gaps in between for cooler cycles or other events.  The precession
    axis (anti-sun direction) is continuously slewed.

    To be consistent with the ground simulation facilities, the satellite pointing
    is expressed in the ICRS (equatorial) system by default.  Detector pointing
    expansion can rotate the output pointing to any other reference frame.
    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    telescope = Instance(
        klass=Telescope, allow_none=True, help="This must be an instance of a Telescope"
    )

    schedule = Instance(
        klass=SatelliteSchedule, allow_none=True, help="Instance of a SatelliteSchedule"
    )

    spin_angle = Quantity(
        30.0 * u.degree, help="The opening angle of the boresight from the spin axis"
    )

    prec_angle = Quantity(
        65.0 * u.degree,
        help="The opening angle of the spin axis from the precession axis",
    )

    hwp_rpm = Float(None, allow_none=True, help="The rate (in RPM) of the HWP rotation")

    hwp_step = Quantity(
        None, allow_none=True, help="For stepped HWP, the angle of each step"
    )

    hwp_step_time = Quantity(
        None, allow_none=True, help="For stepped HWP, the time between steps"
    )

    distribute_time = Bool(
        False,
        help="Distribute observation data along the time axis rather than detector axis",
    )

    detset_key = Unicode(
        None,
        allow_none=True,
        help="If specified, use this column of the focalplane "
        "detector_data to group detectors",
    )

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for common flags",
    )

    hwp_angle = Unicode(
        None, allow_none=True, help="Observation shared key for HWP angle"
    )

    boresight = Unicode(
        defaults.boresight_radec, help="Observation shared key for boresight"
    )

    coord = Unicode(
        "C", help="Coordinate system to use for pointing. One of ('C', 'E', 'G')"
    )

    position = Unicode(defaults.position, help="Observation shared key for position")

    velocity = Unicode(defaults.velocity, help="Observation shared key for velocity")

    det_data = Unicode(
        defaults.det_data,
        allow_none=True,
        help="Observation detdata key to initialize",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to initialize",
    )

    @traitlets.validate("coord")
    def _check_coord(self, proposal):
        check = proposal["value"]
        if check is not None:
            if check not in ["E", "C", "G"]:
                raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'")
        return check

    @traitlets.validate("telescope")
    def _check_telescope(self, proposal):
        tele = proposal["value"]
        if tele is not None:
            try:
                dets = tele.focalplane.detectors
            except Exception:
                raise traitlets.TraitError(
                    "telescope must be a Telescope instance with a focalplane"
                )
        return tele

    @traitlets.validate("schedule")
    def _check_schedule(self, proposal):
        sch = proposal["value"]
        if sch is not None:
            if not isinstance(sch, SatelliteSchedule):
                raise traitlets.TraitError(
                    "schedule must be an instance of a SatelliteSchedule"
                )
        return sch

    @traitlets.validate("hwp_angle")
    def _check_hwp_angle(self, proposal):
        hwp_angle = proposal["value"]
        if hwp_angle is None:
            if self.hwp_rpm is not None or self.hwp_step is not None:
                raise traitlets.TraitError(
                    "Cannot simulate HWP without a shared data key"
                )
        else:
            if self.hwp_rpm is None and self.hwp_step is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_angle

    @traitlets.validate("hwp_rpm")
    def _check_hwp_rpm(self, proposal):
        hwp_rpm = proposal["value"]
        if hwp_rpm is not None:
            if self.hwp_angle is None:
                raise traitlets.TraitError(
                    "Cannot simulate rotating HWP without a shared data key"
                )
            if self.hwp_step is not None:
                raise traitlets.TraitError("HWP cannot rotate *and* step.")
        else:
            if self.hwp_angle is not None and self.hwp_step is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_rpm

    @traitlets.validate("hwp_step")
    def _check_hwp_step(self, proposal):
        hwp_step = proposal["value"]
        if hwp_step is not None:
            if self.hwp_angle is None:
                raise traitlets.TraitError(
                    "Cannot simulate stepped HWP without a shared data key"
                )
            if self.hwp_rpm is not None:
                raise traitlets.TraitError("HWP cannot rotate *and* step.")
        else:
            if self.hwp_angle is not None and self.hwp_rpm is None:
                raise traitlets.TraitError("Cannot simulate HWP without parameters")
        return hwp_step

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def _get_coord_rot(self):
        """Get an optional coordinate rotation quaternion to return satellite
        pointing and velocity in the user-specified frame
        """
        if self.coord == "C":
            coord_rot = None
        elif self.coord == "E":
            coord_rot = qa.equ2ecl()
        elif self.coord == "G":
            coord_rot = qa.equ2gal()
        return coord_rot

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        zaxis = np.array([0, 0, 1], dtype=np.float64)
        coord_rot = self._get_coord_rot()
        log = Logger.get()
        if self.telescope is None:
            raise RuntimeError(
                "The telescope attribute must be set before calling exec()"
            )
        if self.schedule is None:
            raise RuntimeError(
                "The schedule attribute must be set before calling exec()"
            )
        focalplane = self.telescope.focalplane
        rate = focalplane.sample_rate.to_value(u.Hz)
        site = self.telescope.site
        comm = data.comm

        # List of detectors in this pipeline
        pipedets = None
        if detectors is None:
            pipedets = focalplane.detectors
        else:
            pipedets = list()
            for det in focalplane.detectors:
                if det in detectors:
                    pipedets.append(det)

        # Group by detector sets and prune to include only the detectors we
        # are using.
        detsets = None
        if self.detset_key is not None:
            detsets = dict()
            dsets = focalplane.detector_groups(self.detset_key)
            for k, v in dsets.items():
                detsets[k] = list()
                for d in v:
                    if d in pipedets:
                        detsets[k].append(d)

        # Data distribution in the detector direction
        det_ranks = comm.group_size
        if self.distribute_time:
            det_ranks = 1

        # Verify that we have enough data for all of our processes.  If we are
        # distributing by time, we have no sample sets, so can accomodate any
        # number of processes.  If we are distributing by detector, we must have
        # at least one detector set for each process.

        if not self.distribute_time:
            # distributing by detector...
            n_detset = None
            if detsets is None:
                # Every detector is independently distributed
                n_detset = len(pipedets)
            else:
                n_detset = len(detsets)
            if det_ranks > n_detset:
                if comm.group_rank == 0:
                    msg = f"Group {comm.group} has {comm.group_size} "
                    msg += f"processes but {n_detset} detector sets."
                    log.error(msg)
                    raise RuntimeError(msg)

        # The global start is the beginning of the first scan

        mission_start = self.schedule.scans[0].start

        # Satellite motion is continuous across multiple observations, so we simulate
        # continuous sampling and find the actual start / stop times for the samples
        # that fall in each scan time range.

        if len(self.schedule.scans) == 0:
            raise RuntimeError("Schedule has no scans!")

        scan_starts = list()
        scan_stops = list()
        scan_offsets = list()
        scan_samples = list()

        incr = 1.0 / rate
        off = 0
        for scan in self.schedule.scans:
            ffirst = rate * (scan.start - mission_start).total_seconds()
            first = int(ffirst)
            if ffirst - first > 1.0e-3 * incr:
                first += 1
            start = first * incr + mission_start.timestamp()
            ns = 1 + int(rate * (scan.stop.timestamp() - start))
            stop = (ns - 1) * incr + mission_start.timestamp()
            scan_starts.append(start)
            scan_stops.append(stop)
            scan_samples.append(ns)
            scan_offsets.append(off)
            off += ns

        # Distribute the observations uniformly among groups.  We take each scan and
        # weight it by the duration.

        groupdist = distribute_discrete(scan_samples, comm.ngroups)

        # Every process group creates its observations

        group_firstobs = groupdist[comm.group][0]
        group_numobs = groupdist[comm.group][1]

        for obindx in range(group_firstobs, group_firstobs + group_numobs):
            scan = self.schedule.scans[obindx]

            ses_start = scan_starts[obindx]
            ses_end = ses_start + float(scan_samples[obindx] - 1) / rate

            session = Session(
                f"{scan.name}_{int(ses_start):10d}",
                start=datetime.fromtimestamp(ses_start).astimezone(timezone.utc),
                end=datetime.fromtimestamp(ses_end).astimezone(timezone.utc),
            )

            ob = Observation(
                comm,
                self.telescope,
                scan_samples[obindx],
                name=f"{scan.name}_{int(scan.start.timestamp())}",
                uid=name_UID(scan.name),
                session=session,
                detector_sets=detsets,
                process_rows=det_ranks,
            )

            # Create shared objects for timestamps, common flags, position,
            # and velocity.
            ob.shared.create_column(
                self.times,
                shape=(ob.n_local_samples,),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.shared_flags,
                shape=(ob.n_local_samples,),
                dtype=np.uint8,
            )
            ob.shared.create_column(
                self.position,
                shape=(ob.n_local_samples, 3),
                dtype=np.float64,
            )
            ob.shared.create_column(
                self.velocity,
                shape=(ob.n_local_samples, 3),
                dtype=np.float64,
            )

            # Rank zero of each grid column creates the data

            stamps = None
            position = None
            velocity = None
            q_prec = None

            if ob.comm_col_rank == 0:
                start_time = scan_starts[obindx] + float(ob.local_index_offset) / rate
                stop_time = start_time + float(ob.n_local_samples - 1) / rate
                stamps = np.linspace(
                    start_time,
                    stop_time,
                    num=ob.n_local_samples,
                    endpoint=True,
                    dtype=np.float64,
                )

                # Get the motion of the site for these times.
                position, velocity = site.position_velocity(stamps)
                if coord_rot is not None:
                    # `site` always returns ICRS (celestial) position
                    position = qa.rotate(coord_rot, position)
                    velocity = qa.rotate(coord_rot, velocity)

                # Get the quaternions for the precession axis.  For now, assume that
                # it simply points away from the solar system barycenter

                pos_norm = np.sqrt((position * position).sum(axis=1)).reshape(-1, 1)
                pos_norm = 1.0 / pos_norm
                prec_axis = pos_norm * position
                q_prec = qa.from_vectors(
                    np.tile(zaxis, ob.n_local_samples).reshape(-1, 3), prec_axis
                )

            ob.shared[self.times].set(stamps, offset=(0,), fromrank=0)
            ob.shared[self.position].set(position, offset=(0, 0), fromrank=0)
            ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0)

            # Create boresight pointing

            satellite_scanning(
                ob,
                self.boresight,
                sample_offset=scan_offsets[obindx],
                q_prec=q_prec,
                spin_period=scan.spin_period,
                spin_angle=self.spin_angle,
                prec_period=scan.prec_period,
                prec_angle=self.prec_angle,
            )

            # Set HWP angle

            simulate_hwp_response(
                ob,
                ob_time_key=self.times,
                ob_angle_key=self.hwp_angle,
                ob_mueller_key=None,
                hwp_start=scan_starts[obindx] * u.second,
                hwp_rpm=self.hwp_rpm,
                hwp_step=self.hwp_step,
                hwp_step_time=self.hwp_step_time,
            )

            # Optionally initialize detector data

            dets = ob.select_local_detectors(detectors)

            if self.det_data is not None:
                exists_data = ob.detdata.ensure(
                    self.det_data,
                    dtype=np.float64,
                    detectors=dets,
                    create_units=self.det_data_units,
                )

            if self.det_flags is not None:
                exists_flags = ob.detdata.ensure(
                    self.det_flags, dtype=np.uint8, detectors=dets
                )

            data.obs.append(ob)

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        return dict()

    def _provides(self):
        prov = {
            "shared": [
                self.times,
                self.shared_flags,
                self.boresight,
                self.hwp_angle,
                self.position,
                self.velocity,
            ]
        }
        if self.det_data is not None:
            prov["detdata"].append(self.det_data)
        if self.det_flags is not None:
            prov["detdata"].append(self.det_flags)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

boresight = Unicode(defaults.boresight_radec, help='Observation shared key for boresight') class-attribute instance-attribute

coord = Unicode('C', help="Coordinate system to use for pointing. One of ('C', 'E', 'G')") class-attribute instance-attribute

det_data = Unicode(defaults.det_data, allow_none=True, help='Observation detdata key to initialize') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to initialize') class-attribute instance-attribute

detset_key = Unicode(None, allow_none=True, help='If specified, use this column of the focalplane detector_data to group detectors') class-attribute instance-attribute

distribute_time = Bool(False, help='Distribute observation data along the time axis rather than detector axis') class-attribute instance-attribute

hwp_angle = Unicode(None, allow_none=True, help='Observation shared key for HWP angle') class-attribute instance-attribute

hwp_rpm = Float(None, allow_none=True, help='The rate (in RPM) of the HWP rotation') class-attribute instance-attribute

hwp_step = Quantity(None, allow_none=True, help='For stepped HWP, the angle of each step') class-attribute instance-attribute

hwp_step_time = Quantity(None, allow_none=True, help='For stepped HWP, the time between steps') class-attribute instance-attribute

position = Unicode(defaults.position, help='Observation shared key for position') class-attribute instance-attribute

prec_angle = Quantity(65.0 * u.degree, help='The opening angle of the spin axis from the precession axis') class-attribute instance-attribute

schedule = Instance(klass=SatelliteSchedule, allow_none=True, help='Instance of a SatelliteSchedule') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for common flags') class-attribute instance-attribute

spin_angle = Quantity(30.0 * u.degree, help='The opening angle of the boresight from the spin axis') class-attribute instance-attribute

telescope = Instance(klass=Telescope, allow_none=True, help='This must be an instance of a Telescope') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

velocity = Unicode(defaults.velocity, help='Observation shared key for velocity') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_satellite.py
352
353
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_coord(proposal)

Source code in toast/ops/sim_satellite.py
279
280
281
282
283
284
285
@traitlets.validate("coord")
def _check_coord(self, proposal):
    check = proposal["value"]
    if check is not None:
        if check not in ["E", "C", "G"]:
            raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'")
    return check

_check_hwp_angle(proposal)

Source code in toast/ops/sim_satellite.py
309
310
311
312
313
314
315
316
317
318
319
320
@traitlets.validate("hwp_angle")
def _check_hwp_angle(self, proposal):
    hwp_angle = proposal["value"]
    if hwp_angle is None:
        if self.hwp_rpm is not None or self.hwp_step is not None:
            raise traitlets.TraitError(
                "Cannot simulate HWP without a shared data key"
            )
    else:
        if self.hwp_rpm is None and self.hwp_step is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_angle

_check_hwp_rpm(proposal)

Source code in toast/ops/sim_satellite.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
@traitlets.validate("hwp_rpm")
def _check_hwp_rpm(self, proposal):
    hwp_rpm = proposal["value"]
    if hwp_rpm is not None:
        if self.hwp_angle is None:
            raise traitlets.TraitError(
                "Cannot simulate rotating HWP without a shared data key"
            )
        if self.hwp_step is not None:
            raise traitlets.TraitError("HWP cannot rotate *and* step.")
    else:
        if self.hwp_angle is not None and self.hwp_step is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_rpm

_check_hwp_step(proposal)

Source code in toast/ops/sim_satellite.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
@traitlets.validate("hwp_step")
def _check_hwp_step(self, proposal):
    hwp_step = proposal["value"]
    if hwp_step is not None:
        if self.hwp_angle is None:
            raise traitlets.TraitError(
                "Cannot simulate stepped HWP without a shared data key"
            )
        if self.hwp_rpm is not None:
            raise traitlets.TraitError("HWP cannot rotate *and* step.")
    else:
        if self.hwp_angle is not None and self.hwp_rpm is None:
            raise traitlets.TraitError("Cannot simulate HWP without parameters")
    return hwp_step

_check_schedule(proposal)

Source code in toast/ops/sim_satellite.py
299
300
301
302
303
304
305
306
307
@traitlets.validate("schedule")
def _check_schedule(self, proposal):
    sch = proposal["value"]
    if sch is not None:
        if not isinstance(sch, SatelliteSchedule):
            raise traitlets.TraitError(
                "schedule must be an instance of a SatelliteSchedule"
            )
    return sch

_check_telescope(proposal)

Source code in toast/ops/sim_satellite.py
287
288
289
290
291
292
293
294
295
296
297
@traitlets.validate("telescope")
def _check_telescope(self, proposal):
    tele = proposal["value"]
    if tele is not None:
        try:
            dets = tele.focalplane.detectors
        except Exception:
            raise traitlets.TraitError(
                "telescope must be a Telescope instance with a focalplane"
            )
    return tele

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_satellite.py
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    zaxis = np.array([0, 0, 1], dtype=np.float64)
    coord_rot = self._get_coord_rot()
    log = Logger.get()
    if self.telescope is None:
        raise RuntimeError(
            "The telescope attribute must be set before calling exec()"
        )
    if self.schedule is None:
        raise RuntimeError(
            "The schedule attribute must be set before calling exec()"
        )
    focalplane = self.telescope.focalplane
    rate = focalplane.sample_rate.to_value(u.Hz)
    site = self.telescope.site
    comm = data.comm

    # List of detectors in this pipeline
    pipedets = None
    if detectors is None:
        pipedets = focalplane.detectors
    else:
        pipedets = list()
        for det in focalplane.detectors:
            if det in detectors:
                pipedets.append(det)

    # Group by detector sets and prune to include only the detectors we
    # are using.
    detsets = None
    if self.detset_key is not None:
        detsets = dict()
        dsets = focalplane.detector_groups(self.detset_key)
        for k, v in dsets.items():
            detsets[k] = list()
            for d in v:
                if d in pipedets:
                    detsets[k].append(d)

    # Data distribution in the detector direction
    det_ranks = comm.group_size
    if self.distribute_time:
        det_ranks = 1

    # Verify that we have enough data for all of our processes.  If we are
    # distributing by time, we have no sample sets, so can accomodate any
    # number of processes.  If we are distributing by detector, we must have
    # at least one detector set for each process.

    if not self.distribute_time:
        # distributing by detector...
        n_detset = None
        if detsets is None:
            # Every detector is independently distributed
            n_detset = len(pipedets)
        else:
            n_detset = len(detsets)
        if det_ranks > n_detset:
            if comm.group_rank == 0:
                msg = f"Group {comm.group} has {comm.group_size} "
                msg += f"processes but {n_detset} detector sets."
                log.error(msg)
                raise RuntimeError(msg)

    # The global start is the beginning of the first scan

    mission_start = self.schedule.scans[0].start

    # Satellite motion is continuous across multiple observations, so we simulate
    # continuous sampling and find the actual start / stop times for the samples
    # that fall in each scan time range.

    if len(self.schedule.scans) == 0:
        raise RuntimeError("Schedule has no scans!")

    scan_starts = list()
    scan_stops = list()
    scan_offsets = list()
    scan_samples = list()

    incr = 1.0 / rate
    off = 0
    for scan in self.schedule.scans:
        ffirst = rate * (scan.start - mission_start).total_seconds()
        first = int(ffirst)
        if ffirst - first > 1.0e-3 * incr:
            first += 1
        start = first * incr + mission_start.timestamp()
        ns = 1 + int(rate * (scan.stop.timestamp() - start))
        stop = (ns - 1) * incr + mission_start.timestamp()
        scan_starts.append(start)
        scan_stops.append(stop)
        scan_samples.append(ns)
        scan_offsets.append(off)
        off += ns

    # Distribute the observations uniformly among groups.  We take each scan and
    # weight it by the duration.

    groupdist = distribute_discrete(scan_samples, comm.ngroups)

    # Every process group creates its observations

    group_firstobs = groupdist[comm.group][0]
    group_numobs = groupdist[comm.group][1]

    for obindx in range(group_firstobs, group_firstobs + group_numobs):
        scan = self.schedule.scans[obindx]

        ses_start = scan_starts[obindx]
        ses_end = ses_start + float(scan_samples[obindx] - 1) / rate

        session = Session(
            f"{scan.name}_{int(ses_start):10d}",
            start=datetime.fromtimestamp(ses_start).astimezone(timezone.utc),
            end=datetime.fromtimestamp(ses_end).astimezone(timezone.utc),
        )

        ob = Observation(
            comm,
            self.telescope,
            scan_samples[obindx],
            name=f"{scan.name}_{int(scan.start.timestamp())}",
            uid=name_UID(scan.name),
            session=session,
            detector_sets=detsets,
            process_rows=det_ranks,
        )

        # Create shared objects for timestamps, common flags, position,
        # and velocity.
        ob.shared.create_column(
            self.times,
            shape=(ob.n_local_samples,),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.shared_flags,
            shape=(ob.n_local_samples,),
            dtype=np.uint8,
        )
        ob.shared.create_column(
            self.position,
            shape=(ob.n_local_samples, 3),
            dtype=np.float64,
        )
        ob.shared.create_column(
            self.velocity,
            shape=(ob.n_local_samples, 3),
            dtype=np.float64,
        )

        # Rank zero of each grid column creates the data

        stamps = None
        position = None
        velocity = None
        q_prec = None

        if ob.comm_col_rank == 0:
            start_time = scan_starts[obindx] + float(ob.local_index_offset) / rate
            stop_time = start_time + float(ob.n_local_samples - 1) / rate
            stamps = np.linspace(
                start_time,
                stop_time,
                num=ob.n_local_samples,
                endpoint=True,
                dtype=np.float64,
            )

            # Get the motion of the site for these times.
            position, velocity = site.position_velocity(stamps)
            if coord_rot is not None:
                # `site` always returns ICRS (celestial) position
                position = qa.rotate(coord_rot, position)
                velocity = qa.rotate(coord_rot, velocity)

            # Get the quaternions for the precession axis.  For now, assume that
            # it simply points away from the solar system barycenter

            pos_norm = np.sqrt((position * position).sum(axis=1)).reshape(-1, 1)
            pos_norm = 1.0 / pos_norm
            prec_axis = pos_norm * position
            q_prec = qa.from_vectors(
                np.tile(zaxis, ob.n_local_samples).reshape(-1, 3), prec_axis
            )

        ob.shared[self.times].set(stamps, offset=(0,), fromrank=0)
        ob.shared[self.position].set(position, offset=(0, 0), fromrank=0)
        ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0)

        # Create boresight pointing

        satellite_scanning(
            ob,
            self.boresight,
            sample_offset=scan_offsets[obindx],
            q_prec=q_prec,
            spin_period=scan.spin_period,
            spin_angle=self.spin_angle,
            prec_period=scan.prec_period,
            prec_angle=self.prec_angle,
        )

        # Set HWP angle

        simulate_hwp_response(
            ob,
            ob_time_key=self.times,
            ob_angle_key=self.hwp_angle,
            ob_mueller_key=None,
            hwp_start=scan_starts[obindx] * u.second,
            hwp_rpm=self.hwp_rpm,
            hwp_step=self.hwp_step,
            hwp_step_time=self.hwp_step_time,
        )

        # Optionally initialize detector data

        dets = ob.select_local_detectors(detectors)

        if self.det_data is not None:
            exists_data = ob.detdata.ensure(
                self.det_data,
                dtype=np.float64,
                detectors=dets,
                create_units=self.det_data_units,
            )

        if self.det_flags is not None:
            exists_flags = ob.detdata.ensure(
                self.det_flags, dtype=np.uint8, detectors=dets
            )

        data.obs.append(ob)

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_satellite.py
606
607
def _finalize(self, data, **kwargs):
    return

_get_coord_rot()

Get an optional coordinate rotation quaternion to return satellite pointing and velocity in the user-specified frame

Source code in toast/ops/sim_satellite.py
355
356
357
358
359
360
361
362
363
364
365
def _get_coord_rot(self):
    """Get an optional coordinate rotation quaternion to return satellite
    pointing and velocity in the user-specified frame
    """
    if self.coord == "C":
        coord_rot = None
    elif self.coord == "E":
        coord_rot = qa.equ2ecl()
    elif self.coord == "G":
        coord_rot = qa.equ2gal()
    return coord_rot

_provides()

Source code in toast/ops/sim_satellite.py
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
def _provides(self):
    prov = {
        "shared": [
            self.times,
            self.shared_flags,
            self.boresight,
            self.hwp_angle,
            self.position,
            self.velocity,
        ]
    }
    if self.det_data is not None:
        prov["detdata"].append(self.det_data)
    if self.det_flags is not None:
        prov["detdata"].append(self.det_flags)
    return prov

_requires()

Source code in toast/ops/sim_satellite.py
609
610
def _requires(self):
    return dict()

Sky Signals

These operators generate detector data containing sources of power from outside the Earth's atmosphere.

toast.ops.SimDipole

Bases: Operator

Operator which generates dipole signal for detectors.

This uses the detector pointing, the telescope velocity vectors, and the solar system motion with respect to the CMB rest frame to compute the observed CMB dipole signal. The dipole timestream is either added (default) or subtracted from the specified detector data.

The telescope velocity and detector quaternions are assumed to be in the same coordinate system.

The "mode" trait determines what components of the telescope motion are included in the observed dipole. Valid options are 'solar' for just the solar system motion, 'orbital' for just the motion of the telescope with respect to the solarsystem barycenter, and 'total' which is the sum of both (and the default).

Source code in toast/ops/sim_tod_dipole.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
@trait_docs
class SimDipole(Operator):
    """Operator which generates dipole signal for detectors.

    This uses the detector pointing, the telescope velocity vectors, and the solar
    system motion with respect to the CMB rest frame to compute the observed CMB dipole
    signal.  The dipole timestream is either added (default) or subtracted from the
    specified detector data.

    The telescope velocity and detector quaternions are assumed to be in the same
    coordinate system.

    The "mode" trait determines what components of the telescope motion are included in
    the observed dipole.  Valid options are 'solar' for just the solar system motion,
    'orbital' for just the motion of the telescope with respect to the solarsystem
    barycenter, and 'total' which is the sum of both (and the default).

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating dipole timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    boresight = Unicode(
        defaults.boresight_radec, help="Observation shared key for boresight"
    )

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for telescope flags to use",
    )

    shared_flag_mask = Int(
        defaults.shared_mask_invalid, help="Bit mask value for optional flagging"
    )

    velocity = Unicode(defaults.velocity, help="Observation shared key for velocity")

    subtract = Bool(
        False, help="If True, subtract the dipole timestream instead of accumulating"
    )

    mode = Unicode("total", help="Valid options are solar, orbital, and total")

    coord = Unicode(
        "E",
        help="Valid options are 'C' (Equatorial), 'E' (Ecliptic), and 'G' (Galactic)",
    )

    solar_speed = Quantity(
        369.0 * u.kilometer / u.second,
        help="Amplitude of the solarsystem barycenter velocity with respect to the CMB",
    )

    solar_gal_lat = Quantity(
        48.26 * u.degree, help="Galactic latitude of direction of solarsystem motion"
    )

    solar_gal_lon = Quantity(
        263.99 * u.degree, help="Galactic longitude of direction of solarsystem motion"
    )

    cmb = Quantity(2.72548 * u.Kelvin, help="CMB monopole value")

    freq = Quantity(0 * u.Hz, help="Optional observing frequency")

    @traitlets.validate("mode")
    def _check_mode(self, proposal):
        check = proposal["value"]
        if check not in ["solar", "orbital", "total"]:
            raise traitlets.TraitError(
                "Invalid mode (must be 'solar', 'orbital' or 'total')"
            )
        return check

    @traitlets.validate("coord")
    def _check_coord(self, proposal):
        check = proposal["value"]
        if check is not None:
            if check not in ["E", "C", "G"]:
                raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'")
        return check

    @traitlets.validate("shared_flag_mask")
    def _check_shared_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        env = Environment.get()
        log = Logger.get()

        nullquat = np.array([0, 0, 0, 1], dtype=np.float64)

        # Compute the solar system velocity in galactic coordinates
        solar_gal_theta = np.deg2rad(90.0 - self.solar_gal_lat.to_value(u.degree))
        solar_gal_phi = np.deg2rad(self.solar_gal_lon.to_value(u.degree))

        solar_speed_kms = self.solar_speed.to_value(u.kilometer / u.second)
        solar_projected = solar_speed_kms * np.sin(solar_gal_theta)

        sol_z = solar_speed_kms * np.cos(solar_gal_theta)
        sol_x = solar_projected * np.cos(solar_gal_phi)
        sol_y = solar_projected * np.sin(solar_gal_phi)
        solar_gal_vel = np.array([sol_x, sol_y, sol_z])

        # Rotate solar system velocity to desired coordinate frame
        solar_vel = None
        if self.coord == "G":
            solar_vel = solar_gal_vel
        else:
            rotmat = hp.rotator.Rotator(coord=["G", self.coord]).mat
            solar_vel = np.ravel(np.dot(rotmat, solar_gal_vel))

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            # Make sure detector data output exists
            exists = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )

            # Unit conversion from dipole timestream (K) to det data units
            scale = unit_conversion(u.K, ob.detdata[self.det_data].units)

            # Loop over views
            views = ob.view[self.view]

            for vw in range(len(views)):
                # Boresight pointing quaternions
                boresight = np.array(views.shared[self.boresight][vw])
                flags = views.shared[self.shared_flags][vw] & self.shared_flag_mask
                bad = flags != 0
                boresight[bad, :] = nullquat

                # Set the solar and orbital velocity inputs based on the
                # requested mode.

                sol = None
                vel = None
                if (self.mode == "solar") or (self.mode == "total"):
                    sol = solar_vel
                if (self.mode == "orbital") or (self.mode == "total"):
                    vel = views.shared[self.velocity][vw]

                # Focalplane for this observation
                focalplane = ob.telescope.focalplane

                for det in dets:
                    props = focalplane[det]

                    # Detector quaternion offset from the boresight
                    detquat = props["quat"]

                    # Timestream of detector quaternions
                    quats = qa.mult(boresight, detquat)

                    # Compute the dipole timestream for this view and detector
                    dipole_tod = dipole(
                        quats,
                        vel=vel,
                        solar=sol,
                        cmb=self.cmb,
                        freq=self.freq,
                    )

                    # Add contribution to output
                    if self.subtract:
                        views.detdata[self.det_data][vw][det] -= scale * dipole_tod
                    else:
                        views.detdata[self.det_data][vw][det] += scale * dipole_tod
        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": [
                self.boresight,
                self.shared_flags,
            ],
            "detdata": [self.det_data],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [
                self.det_data,
            ],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

boresight = Unicode(defaults.boresight_radec, help='Observation shared key for boresight') class-attribute instance-attribute

cmb = Quantity(2.72548 * u.Kelvin, help='CMB monopole value') class-attribute instance-attribute

coord = Unicode('E', help="Valid options are 'C' (Equatorial), 'E' (Ecliptic), and 'G' (Galactic)") class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for accumulating dipole timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

freq = Quantity(0 * u.Hz, help='Optional observing frequency') class-attribute instance-attribute

mode = Unicode('total', help='Valid options are solar, orbital, and total') class-attribute instance-attribute

shared_flag_mask = Int(defaults.shared_mask_invalid, help='Bit mask value for optional flagging') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for telescope flags to use') class-attribute instance-attribute

solar_gal_lat = Quantity(48.26 * u.degree, help='Galactic latitude of direction of solarsystem motion') class-attribute instance-attribute

solar_gal_lon = Quantity(263.99 * u.degree, help='Galactic longitude of direction of solarsystem motion') class-attribute instance-attribute

solar_speed = Quantity(369.0 * u.kilometer / u.second, help='Amplitude of the solarsystem barycenter velocity with respect to the CMB') class-attribute instance-attribute

subtract = Bool(False, help='If True, subtract the dipole timestream instead of accumulating') class-attribute instance-attribute

velocity = Unicode(defaults.velocity, help='Observation shared key for velocity') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_tod_dipole.py
123
124
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_coord(proposal)

Source code in toast/ops/sim_tod_dipole.py
108
109
110
111
112
113
114
@traitlets.validate("coord")
def _check_coord(self, proposal):
    check = proposal["value"]
    if check is not None:
        if check not in ["E", "C", "G"]:
            raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'")
    return check

_check_mode(proposal)

Source code in toast/ops/sim_tod_dipole.py
 99
100
101
102
103
104
105
106
@traitlets.validate("mode")
def _check_mode(self, proposal):
    check = proposal["value"]
    if check not in ["solar", "orbital", "total"]:
        raise traitlets.TraitError(
            "Invalid mode (must be 'solar', 'orbital' or 'total')"
        )
    return check

_check_shared_flag_mask(proposal)

Source code in toast/ops/sim_tod_dipole.py
116
117
118
119
120
121
@traitlets.validate("shared_flag_mask")
def _check_shared_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Flag mask should be a positive integer")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_tod_dipole.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    env = Environment.get()
    log = Logger.get()

    nullquat = np.array([0, 0, 0, 1], dtype=np.float64)

    # Compute the solar system velocity in galactic coordinates
    solar_gal_theta = np.deg2rad(90.0 - self.solar_gal_lat.to_value(u.degree))
    solar_gal_phi = np.deg2rad(self.solar_gal_lon.to_value(u.degree))

    solar_speed_kms = self.solar_speed.to_value(u.kilometer / u.second)
    solar_projected = solar_speed_kms * np.sin(solar_gal_theta)

    sol_z = solar_speed_kms * np.cos(solar_gal_theta)
    sol_x = solar_projected * np.cos(solar_gal_phi)
    sol_y = solar_projected * np.sin(solar_gal_phi)
    solar_gal_vel = np.array([sol_x, sol_y, sol_z])

    # Rotate solar system velocity to desired coordinate frame
    solar_vel = None
    if self.coord == "G":
        solar_vel = solar_gal_vel
    else:
        rotmat = hp.rotator.Rotator(coord=["G", self.coord]).mat
        solar_vel = np.ravel(np.dot(rotmat, solar_gal_vel))

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        # Make sure detector data output exists
        exists = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )

        # Unit conversion from dipole timestream (K) to det data units
        scale = unit_conversion(u.K, ob.detdata[self.det_data].units)

        # Loop over views
        views = ob.view[self.view]

        for vw in range(len(views)):
            # Boresight pointing quaternions
            boresight = np.array(views.shared[self.boresight][vw])
            flags = views.shared[self.shared_flags][vw] & self.shared_flag_mask
            bad = flags != 0
            boresight[bad, :] = nullquat

            # Set the solar and orbital velocity inputs based on the
            # requested mode.

            sol = None
            vel = None
            if (self.mode == "solar") or (self.mode == "total"):
                sol = solar_vel
            if (self.mode == "orbital") or (self.mode == "total"):
                vel = views.shared[self.velocity][vw]

            # Focalplane for this observation
            focalplane = ob.telescope.focalplane

            for det in dets:
                props = focalplane[det]

                # Detector quaternion offset from the boresight
                detquat = props["quat"]

                # Timestream of detector quaternions
                quats = qa.mult(boresight, detquat)

                # Compute the dipole timestream for this view and detector
                dipole_tod = dipole(
                    quats,
                    vel=vel,
                    solar=sol,
                    cmb=self.cmb,
                    freq=self.freq,
                )

                # Add contribution to output
                if self.subtract:
                    views.detdata[self.det_data][vw][det] -= scale * dipole_tod
                else:
                    views.detdata[self.det_data][vw][det] += scale * dipole_tod
    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_tod_dipole.py
216
217
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_tod_dipole.py
233
234
235
236
237
238
239
240
241
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [
            self.det_data,
        ],
    }
    return prov

_requires()

Source code in toast/ops/sim_tod_dipole.py
219
220
221
222
223
224
225
226
227
228
229
230
231
def _requires(self):
    req = {
        "meta": list(),
        "shared": [
            self.boresight,
            self.shared_flags,
        ],
        "detdata": [self.det_data],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

Beam-Convolved Sky

toast.ops.SimConviqt

Bases: Operator

Operator which uses libconviqt to generate beam-convolved timestreams.

Source code in toast/ops/conviqt.py
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
@trait_docs
class SimConviqt(Operator):
    """Operator which uses libconviqt to generate beam-convolved timestreams."""

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    comm = Instance(
        klass=MPI_Comm,
        allow_none=True,
        help="MPI communicator to use for the convolution. libConviqt does "
        "not work without MPI.",
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight pointing into detector frame",
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid, help="Bit mask value for detector sample flagging"
    )

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for telescope flags to use",
    )

    shared_flag_mask = Int(
        defaults.shared_mask_invalid, help="Bit mask value for optional flagging"
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    det_data = Unicode(
        defaults.det_data,
        allow_none=False,
        help="Observation detdata key for accumulating convolved timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    calibrate = Bool(
        True,
        allow_none=False,
        help="Calibrate intensity to 1.0, rather than (1 + epsilon) / 2. "
        "Calibrate has no effect if the beam is found to be normalized rather than "
        "scaled with the leakage factor.",
    )

    dxx = Bool(
        True,
        allow_none=False,
        help="The beam frame is either Dxx or Pxx. Pxx includes the rotation to "
        "polarization sensitive basis, Dxx does not. When Dxx=True, detector "
        "orientation from attitude quaternions is corrected for the polarization "
        "angle.",
    )

    pol = Bool(
        True,
        allow_none=False,
        help="Toggle simulated signal polarization",
    )

    mc = Int(
        None,
        allow_none=True,
        help="Monte Carlo index used in synthesizing the input file names.",
    )

    @traitlets.validate("mc")
    def _check_mc(self, proposal):
        check = proposal["value"]
        if check is not None and check < 0:
            raise traitlets.TraitError("MC index cannot be negative")
        return check

    beammmax = Int(
        -1,
        allow_none=False,
        help="Beam maximum m.  Actual resolution in the Healpix FITS file may differ. "
        "If not set, will use the maximum expansion order from file.",
    )

    lmax = Int(
        -1,
        allow_none=False,
        help="Maximum ell (and m).  Actual resolution in the Healpix FITS file may "
        "differ.  If not set, will use the maximum expansion order from file.",
    )

    order = Int(
        13,
        allow_none=False,
        help="Conviqt order parameter (expert mode)",
    )

    verbosity = Int(
        0,
        allow_none=False,
        help="",
    )

    normalize_beam = Bool(
        False,
        allow_none=False,
        help="Normalize beam to have unit response to temperature monopole.",
    )

    remove_dipole = Bool(
        False,
        allow_none=False,
        help="Suppress the temperature dipole in sky_file.",
    )

    remove_monopole = Bool(
        False,
        allow_none=False,
        help="Suppress the temperature monopole in sky_file.",
    )

    apply_flags = Bool(
        False,
        allow_none=False,
        help="Only synthesize signal for unflagged samples.",
    )

    fwhm = Quantity(
        4.0 * u.arcmin,
        allow_none=False,
        help="Width of a symmetric gaussian beam already present in the skyfile "
        "(will be deconvolved away).",
    )

    sky_file_dict = Dict(
        {},
        help="Dictionary of files containing the sky a_lm expansions. An entry for "
        "each detector name must be present. If provided, supersedes `sky_file`.",
    )

    sky_file = Unicode(
        None,
        allow_none=True,
        help="File containing the sky a_lm expansion.  Tag {detector} will be "
        "replaced with the detector name",
    )

    beam_file_dict = Dict(
        {},
        help="Dictionary of files containing the beam a_lm expansions. An entry for "
        "each detector name must be present. If provided, supersedes `beam_file`.",
    )

    beam_file = Unicode(
        None,
        allow_none=True,
        help="File containing the beam a_lm expansion.  Tag {detector} will be "
        "replaced with the detector name.",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("shared_flag_mask")
    def _check_shared_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Shared flag mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    @property
    def available(self):
        """Return True if libconviqt is found in the library search path."""
        return conviqt is not None and conviqt.available

    hwp_angle = Unicode(
        None, allow_none=True, help="Observation shared key for HWP angle"
    )

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        if not self.available:
            raise RuntimeError("libconviqt is not available")

        if self.comm is None:
            raise RuntimeError("libconviqt requires MPI")

        if self.detector_pointing is None:
            raise RuntimeError("detector_pointing cannot be None.")

        if self.hwp_angle is not None:
            raise RuntimeError("Standard conviqt operator cannot handle HWP angle")

        log = Logger.get()

        timer = Timer()
        timer.start()

        self.units = data.detector_units(self.det_data)
        if self.units is None:
            # This means that the data does not yet exist
            self.units = self.det_data_units

        all_detectors = self._get_all_detectors(data, detectors)

        for det in all_detectors:
            verbose = self.comm.rank == 0 and self.verbosity > 0

            # Expand detector pointing
            self.detector_pointing.apply(data, detectors=[det])

            if det in self.sky_file_dict:
                sky_file = self.sky_file_dict[det]
            else:
                sky_file = self.sky_file.format(detector=det, mc=self.mc)
            sky = self.get_sky(sky_file, det, verbose)

            if det in self.beam_file_dict:
                beam_file = self.beam_file_dict[det]
            else:
                beam_file = self.beam_file.format(detector=det, mc=self.mc)

            beam = self.get_beam(beam_file, det, verbose)

            detector = self.get_detector(det)

            theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
                data, det, verbose
            )

            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

            convolved_data = self.convolve(sky, beam, detector, pnt, det, verbose)

            self.calibrate_signal(data, det, beam, convolved_data, verbose)
            self.save(data, det, convolved_data, verbose)

            del pnt, detector, beam, sky

            if verbose:
                timer.report_clear(f"conviqt process detector {det}")

        return

    def _get_all_detectors(self, data, detectors):
        """Assemble a list of detectors across all processes and
        observations in `self._comm`.
        """
        my_dets = set()
        for obs in data.obs:
            # Get the detectors we are using for this observation
            obs_dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
            for det in obs_dets:
                my_dets.add(det)
            # Make sure detector data output exists
            exists = obs.detdata.ensure(
                self.det_data, detectors=detectors, create_units=self.units
            )
        all_dets = self.comm.gather(my_dets, root=0)
        if self.comm.rank == 0:
            for some_dets in all_dets:
                my_dets.update(some_dets)
            my_dets = sorted(my_dets)
        all_dets = self.comm.bcast(my_dets, root=0)
        return all_dets

    def _get_psi_pol(self, focalplane, det):
        """Parse polarization angle in radians from the focalplane
        dictionary.  The angle is relative to the Pxx basis.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "pol" in props.colnames:
            pol = props["pol"]
        else:
            pol = None
        if pol == "A" or det.endswith(("a", "A", "t", "T")):
            return 0.0
        elif pol == "B" or det.endswith(("b", "B")):
            return np.pi / 2
        # Only if the polarization type is not recorded, will we look for
        # polarization angle in the focalplane
        if "psi_pol" in props.colnames:
            psi_pol = props["psi_pol"].to_value(u.radian)
        elif "pol_angle" in props.colnames:
            warnings.warn(
                "Use psi_pol and psi_uv rather than pol_angle", DeprecationWarning
            )
            psi_pol = props["pol_angle"].to_value(u.radian)
        else:
            raise RuntimeError(f"focalplane[{det}] does not include psi_pol")
        return psi_pol

    def _get_psi_uv(self, focalplane, det):
        """Parse Pxx basis angle in radians from the focalplane
        dictionary.  The angle is measured from Dxx to Pxx basis.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "psi_uv_deg" in props.colnames:
            psi_uv = props["psi_uv"].to_value(u.radian)
        else:
            msg = f"focalplane[{det}] does not include 'psi_uv'. "
            msg += f"Valid column names are {props.colnames}"
            warnings.warn(msg)
            psi_uv = 0
        return psi_uv

    def _get_epsilon(self, focalplane, det):
        """Parse polarization leakage (epsilon) from the focalplane
        object or dictionary.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "pol_leakage" in props.colnames:
            epsilon = focalplane[det]["pol_leakage"]
        else:
            # Assume zero polarization leakage
            epsilon = 0
        return epsilon

    def get_sky(self, skyfile, det, verbose, pol=None):
        timer = Timer()
        timer.start()
        if pol is None:
            pol = self.pol
        sky = conviqt.Sky(
            self.lmax,
            pol,
            skyfile,
            self.fwhm.to_value(u.arcmin),
            self.comm,
        )
        if self.remove_monopole:
            sky.remove_monopole()
        if self.remove_dipole:
            sky.remove_dipole()
        if verbose:
            timer.report_clear(f"initialize sky for detector {det}")
        return sky

    def get_beam(self, beamfile, det, verbose, pol=None):
        timer = Timer()
        timer.start()
        if pol is None:
            pol = self.pol
        beam = conviqt.Beam(self.lmax, self.beammmax, pol, beamfile, self.comm)
        if self.normalize_beam:
            beam.normalize()
        if verbose:
            timer.report_clear(f"initialize beam for detector {det}")
        return beam

    def get_detector(self, det):
        """We always create the detector with zero leakage and scale
        the returned TOD ourselves
        """
        detector = conviqt.Detector(name=det, epsilon=0)
        return detector

    def get_pointing(self, data, det, verbose):
        """Return the detector pointing as ZYZ Euler angles without the
        polarization sensitive angle.  These angles are to be compatible
        with Pxx or Dxx frame beam products
        """
        # We need the three pointing angles to describe the
        # pointing.  local_pointing() returns the attitude quaternions.
        nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
        timer = Timer()
        timer.start()
        all_theta = []
        all_phi = []
        all_psi_det = []
        all_psi_pol = []
        all_psi_beam = []
        all_hwp_angle = []
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            focalplane = obs.telescope.focalplane
            # Loop over views
            views = obs.view[self.view]
            for view in range(len(views)):
                # Get the flags if needed
                flags = None
                if self.apply_flags:
                    if self.shared_flags is not None:
                        flags = np.array(views.shared[self.shared_flags][view])
                        flags &= self.shared_flag_mask
                    if self.det_flags is not None:
                        detflags = np.array(views.detdata[self.det_flags][view][det])
                        detflags &= self.det_flag_mask
                        if flags is not None:
                            flags |= detflags
                        else:
                            flags = detflags

                # Timestream of detector quaternions
                quats = views.detdata[self.detector_pointing.quats][view][det]
                if verbose:
                    timer.report_clear(f"get detector pointing for {det}")

                if flags is not None:
                    quats = quats.copy()
                    quats[flags != 0] = nullquat
                    if verbose:
                        timer.report_clear(f"initialize flags for detector {det}")

                # Note on angles:
                # - psi_det is the angle of right-hand rotation about the line of sight
                #   from the local Southward meridian to the detector polarization
                #   orientation.
                # - psi_pol is the angle from the beam frame to detector polarization
                #   orientation also about the line of sight.
                # - psi_beam is the angle of right-hand rotation about the line of sight
                #   from the local Southward meridian to the beam frame.

                theta, phi, psi_det = qa.to_iso_angles(quats)

                psi_pol = self._get_psi_pol(focalplane, det)
                if self.dxx:
                    # Add angle between Dxx (focalplane) and Pxx
                    psi_pol += self._get_psi_uv(focalplane, det)

                # Beam orientation
                psi_beam = psi_det - psi_pol

                # Separately we store the psi_pol angle, so that we can recover the
                # angle relative to the local meridian when computing the weights.
                psi_pol = np.ones(psi_det.size) * psi_pol

                if self.hwp_angle is None:
                    det_hwp_angle = np.zeros_like(psi_det)
                else:
                    hwp_angle = views.shared[self.hwp_angle][view]
                    # The HWP angle in the detector frame is the angle in the
                    # focalplane frame minus the angle between the focalplane
                    # and detector frames
                    props = focalplane[det]
                    if "gamma" not in props.colnames:
                        msg = (
                            "When using a HWP, the focalplane 'gamma' column must exist"
                        )
                        raise RuntimeError(msg)
                    det_hwp_angle = hwp_angle - props["gamma"].to_value(u.radian)
                    psi_pol += 2 * det_hwp_angle
                all_hwp_angle.append(det_hwp_angle)
                all_theta.append(theta)
                all_phi.append(phi)
                all_psi_det.append(psi_det)
                all_psi_pol.append(psi_pol)
                all_psi_beam.append(psi_beam)

        if len(all_theta) > 0:
            all_theta = np.hstack(all_theta)
            all_phi = np.hstack(all_phi)
            all_psi_det = np.hstack(all_psi_det)
            all_psi_pol = np.hstack(all_psi_pol)
            all_psi_beam = np.hstack(all_psi_beam)
            all_hwp_angle = np.hstack(all_hwp_angle)
        else:
            # This process has no data for this detector.  Ensure that
            # we return an empty array, not a list
            all_theta = np.array(all_theta)
            all_phi = np.array(all_phi)
            all_psi_det = np.array(all_psi_det)
            all_psi_pol = np.array(all_psi_pol)
            all_psi_beam = np.array(all_psi_beam)
            all_hwp_angle = np.array(all_hwp_angle)

        if verbose:
            timer.report_clear(f"compute pointing angles for detector {det}")
        return all_theta, all_phi, all_psi_det, all_psi_pol, all_psi_beam, all_hwp_angle

    def get_buffer(self, theta, phi, psi, det, verbose):
        """Pack the pointing into the conviqt pointing array"""
        timer = Timer()
        timer.start()
        pnt = conviqt.Pointing(len(theta))
        if pnt._nrow > 0:
            arr = pnt.data()
            arr[:, 0] = phi
            arr[:, 1] = theta
            arr[:, 2] = psi
        if verbose:
            timer.report_clear(f"pack input array for detector {det}")
        return pnt

    def convolve(self, sky, beam, detector, pnt, det, verbose, pol=None):
        timer = Timer()
        timer.start()
        if pol is None:
            pol = self.pol
        convolver = conviqt.Convolver(
            sky,
            beam,
            detector,
            pol,
            self.lmax,
            self.beammmax,
            self.order,
            self.verbosity,
            self.comm,
        )
        convolver.convolve(pnt)
        if verbose:
            timer.report_clear(f"convolve detector {det}")

        # The pointer to the data will have changed during
        # the convolution call ...

        if pnt._nrow > 0:
            arr = pnt.data()
            convolved_data = arr[:, 3].astype(np.float64)
        else:
            convolved_data = None
        if verbose:
            timer.report_clear(f"extract convolved data for {det}")

        del convolver

        return convolved_data

    def calibrate_signal(self, data, det, beam, convolved_data, verbose):
        """By default, libConviqt results returns a signal that conforms to
        TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

        When calibrate = True, we rescale the TOD to
        TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization
        """
        if not self.calibrate or beam.normalized():
            return
        timer = Timer()
        timer.start()
        offset = 0
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            focalplane = obs.telescope.focalplane
            epsilon = self._get_epsilon(focalplane, det)
            # Make sure detector data output exists
            exists = obs.detdata.ensure(
                self.det_data, detectors=[det], create_units=self.units
            )
            # Loop over views
            views = obs.view[self.view]
            for view in views.detdata[self.det_data]:
                nsample = len(view[det])
                convolved_data[offset : offset + nsample] *= 2 / (1 + epsilon)
                offset += nsample
        if verbose:
            timer.report_clear(f"calibrate detector {det}")
        return

    def save(self, data, det, convolved_data, verbose):
        """Store the convolved data."""
        timer = Timer()
        timer.start()
        offset = 0
        scale = unit_conversion(u.K, self.units)
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            # Loop over views
            views = obs.view[self.view]
            for view in views.detdata[self.det_data]:
                nsample = len(view[det])
                view[det] += scale * convolved_data[offset : offset + nsample]
                offset += nsample
        if verbose:
            timer.report_clear(f"save detector {det}")
        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = self.detector_pointing.requires()
        req["global"].extend([self.pixel_dist, self.covariance])
        req["meta"].extend([self.noise_model])
        req["shared"] = [self.boresight]
        if "detdata" not in req:
            req["detdata"] = list()
        req["detdata"].append(self.det_data)
        if self.shared_flags is not None:
            req["shared"].append(self.shared_flags)
        if self.det_flags is not None:
            req["detdata"].append(self.det_flags)
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = self.detector_pointing.provides()
        prov["detdata"].append(self.det_data)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

apply_flags = Bool(False, allow_none=False, help='Only synthesize signal for unflagged samples.') class-attribute instance-attribute

available property

Return True if libconviqt is found in the library search path.

beam_file = Unicode(None, allow_none=True, help='File containing the beam a_lm expansion. Tag {detector} will be replaced with the detector name.') class-attribute instance-attribute

beam_file_dict = Dict({}, help='Dictionary of files containing the beam a_lm expansions. An entry for each detector name must be present. If provided, supersedes `beam_file`.') class-attribute instance-attribute

beammmax = Int(-1, allow_none=False, help='Beam maximum m. Actual resolution in the Healpix FITS file may differ. If not set, will use the maximum expansion order from file.') class-attribute instance-attribute

calibrate = Bool(True, allow_none=False, help='Calibrate intensity to 1.0, rather than (1 + epsilon) / 2. Calibrate has no effect if the beam is found to be normalized rather than scaled with the leakage factor.') class-attribute instance-attribute

comm = Instance(klass=MPI_Comm, allow_none=True, help='MPI communicator to use for the convolution. libConviqt does not work without MPI.') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, allow_none=False, help='Observation detdata key for accumulating convolved timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight pointing into detector frame') class-attribute instance-attribute

dxx = Bool(True, allow_none=False, help='The beam frame is either Dxx or Pxx. Pxx includes the rotation to polarization sensitive basis, Dxx does not. When Dxx=True, detector orientation from attitude quaternions is corrected for the polarization angle.') class-attribute instance-attribute

fwhm = Quantity(4.0 * u.arcmin, allow_none=False, help='Width of a symmetric gaussian beam already present in the skyfile (will be deconvolved away).') class-attribute instance-attribute

hwp_angle = Unicode(None, allow_none=True, help='Observation shared key for HWP angle') class-attribute instance-attribute

lmax = Int(-1, allow_none=False, help='Maximum ell (and m). Actual resolution in the Healpix FITS file may differ. If not set, will use the maximum expansion order from file.') class-attribute instance-attribute

mc = Int(None, allow_none=True, help='Monte Carlo index used in synthesizing the input file names.') class-attribute instance-attribute

normalize_beam = Bool(False, allow_none=False, help='Normalize beam to have unit response to temperature monopole.') class-attribute instance-attribute

order = Int(13, allow_none=False, help='Conviqt order parameter (expert mode)') class-attribute instance-attribute

pol = Bool(True, allow_none=False, help='Toggle simulated signal polarization') class-attribute instance-attribute

remove_dipole = Bool(False, allow_none=False, help='Suppress the temperature dipole in sky_file.') class-attribute instance-attribute

remove_monopole = Bool(False, allow_none=False, help='Suppress the temperature monopole in sky_file.') class-attribute instance-attribute

shared_flag_mask = Int(defaults.shared_mask_invalid, help='Bit mask value for optional flagging') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for telescope flags to use') class-attribute instance-attribute

sky_file = Unicode(None, allow_none=True, help='File containing the sky a_lm expansion. Tag {detector} will be replaced with the detector name') class-attribute instance-attribute

sky_file_dict = Dict({}, help='Dictionary of files containing the sky a_lm expansions. An entry for each detector name must be present. If provided, supersedes `sky_file`.') class-attribute instance-attribute

verbosity = Int(0, allow_none=False, help='') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/conviqt.py
237
238
239
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    return

_check_det_flag_mask(proposal)

Source code in toast/ops/conviqt.py
230
231
232
233
234
235
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/conviqt.py
216
217
218
219
220
221
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_mc(proposal)

Source code in toast/ops/conviqt.py
126
127
128
129
130
131
@traitlets.validate("mc")
def _check_mc(self, proposal):
    check = proposal["value"]
    if check is not None and check < 0:
        raise traitlets.TraitError("MC index cannot be negative")
    return check

_check_shared_flag_mask(proposal)

Source code in toast/ops/conviqt.py
223
224
225
226
227
228
@traitlets.validate("shared_flag_mask")
def _check_shared_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Shared flag mask should be a positive integer")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/conviqt.py
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    if not self.available:
        raise RuntimeError("libconviqt is not available")

    if self.comm is None:
        raise RuntimeError("libconviqt requires MPI")

    if self.detector_pointing is None:
        raise RuntimeError("detector_pointing cannot be None.")

    if self.hwp_angle is not None:
        raise RuntimeError("Standard conviqt operator cannot handle HWP angle")

    log = Logger.get()

    timer = Timer()
    timer.start()

    self.units = data.detector_units(self.det_data)
    if self.units is None:
        # This means that the data does not yet exist
        self.units = self.det_data_units

    all_detectors = self._get_all_detectors(data, detectors)

    for det in all_detectors:
        verbose = self.comm.rank == 0 and self.verbosity > 0

        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=[det])

        if det in self.sky_file_dict:
            sky_file = self.sky_file_dict[det]
        else:
            sky_file = self.sky_file.format(detector=det, mc=self.mc)
        sky = self.get_sky(sky_file, det, verbose)

        if det in self.beam_file_dict:
            beam_file = self.beam_file_dict[det]
        else:
            beam_file = self.beam_file.format(detector=det, mc=self.mc)

        beam = self.get_beam(beam_file, det, verbose)

        detector = self.get_detector(det)

        theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
            data, det, verbose
        )

        pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
        del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

        convolved_data = self.convolve(sky, beam, detector, pnt, det, verbose)

        self.calibrate_signal(data, det, beam, convolved_data, verbose)
        self.save(data, det, convolved_data, verbose)

        del pnt, detector, beam, sky

        if verbose:
            timer.report_clear(f"conviqt process detector {det}")

    return

_finalize(data, **kwargs)

Source code in toast/ops/conviqt.py
648
649
def _finalize(self, data, **kwargs):
    return

_get_all_detectors(data, detectors)

Assemble a list of detectors across all processes and observations in self._comm.

Source code in toast/ops/conviqt.py
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
def _get_all_detectors(self, data, detectors):
    """Assemble a list of detectors across all processes and
    observations in `self._comm`.
    """
    my_dets = set()
    for obs in data.obs:
        # Get the detectors we are using for this observation
        obs_dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
        for det in obs_dets:
            my_dets.add(det)
        # Make sure detector data output exists
        exists = obs.detdata.ensure(
            self.det_data, detectors=detectors, create_units=self.units
        )
    all_dets = self.comm.gather(my_dets, root=0)
    if self.comm.rank == 0:
        for some_dets in all_dets:
            my_dets.update(some_dets)
        my_dets = sorted(my_dets)
    all_dets = self.comm.bcast(my_dets, root=0)
    return all_dets

_get_epsilon(focalplane, det)

Parse polarization leakage (epsilon) from the focalplane object or dictionary.

Source code in toast/ops/conviqt.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def _get_epsilon(self, focalplane, det):
    """Parse polarization leakage (epsilon) from the focalplane
    object or dictionary.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "pol_leakage" in props.colnames:
        epsilon = focalplane[det]["pol_leakage"]
    else:
        # Assume zero polarization leakage
        epsilon = 0
    return epsilon

_get_psi_pol(focalplane, det)

Parse polarization angle in radians from the focalplane dictionary. The angle is relative to the Pxx basis.

Source code in toast/ops/conviqt.py
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
def _get_psi_pol(self, focalplane, det):
    """Parse polarization angle in radians from the focalplane
    dictionary.  The angle is relative to the Pxx basis.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "pol" in props.colnames:
        pol = props["pol"]
    else:
        pol = None
    if pol == "A" or det.endswith(("a", "A", "t", "T")):
        return 0.0
    elif pol == "B" or det.endswith(("b", "B")):
        return np.pi / 2
    # Only if the polarization type is not recorded, will we look for
    # polarization angle in the focalplane
    if "psi_pol" in props.colnames:
        psi_pol = props["psi_pol"].to_value(u.radian)
    elif "pol_angle" in props.colnames:
        warnings.warn(
            "Use psi_pol and psi_uv rather than pol_angle", DeprecationWarning
        )
        psi_pol = props["pol_angle"].to_value(u.radian)
    else:
        raise RuntimeError(f"focalplane[{det}] does not include psi_pol")
    return psi_pol

_get_psi_uv(focalplane, det)

Parse Pxx basis angle in radians from the focalplane dictionary. The angle is measured from Dxx to Pxx basis.

Source code in toast/ops/conviqt.py
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
def _get_psi_uv(self, focalplane, det):
    """Parse Pxx basis angle in radians from the focalplane
    dictionary.  The angle is measured from Dxx to Pxx basis.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "psi_uv_deg" in props.colnames:
        psi_uv = props["psi_uv"].to_value(u.radian)
    else:
        msg = f"focalplane[{det}] does not include 'psi_uv'. "
        msg += f"Valid column names are {props.colnames}"
        warnings.warn(msg)
        psi_uv = 0
    return psi_uv

_provides()

Source code in toast/ops/conviqt.py
667
668
669
670
def _provides(self):
    prov = self.detector_pointing.provides()
    prov["detdata"].append(self.det_data)
    return prov

_requires()

Source code in toast/ops/conviqt.py
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
def _requires(self):
    req = self.detector_pointing.requires()
    req["global"].extend([self.pixel_dist, self.covariance])
    req["meta"].extend([self.noise_model])
    req["shared"] = [self.boresight]
    if "detdata" not in req:
        req["detdata"] = list()
    req["detdata"].append(self.det_data)
    if self.shared_flags is not None:
        req["shared"].append(self.shared_flags)
    if self.det_flags is not None:
        req["detdata"].append(self.det_flags)
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

calibrate_signal(data, det, beam, convolved_data, verbose)

By default, libConviqt results returns a signal that conforms to TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

When calibrate = True, we rescale the TOD to TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization

Source code in toast/ops/conviqt.py
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
def calibrate_signal(self, data, det, beam, convolved_data, verbose):
    """By default, libConviqt results returns a signal that conforms to
    TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

    When calibrate = True, we rescale the TOD to
    TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization
    """
    if not self.calibrate or beam.normalized():
        return
    timer = Timer()
    timer.start()
    offset = 0
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        focalplane = obs.telescope.focalplane
        epsilon = self._get_epsilon(focalplane, det)
        # Make sure detector data output exists
        exists = obs.detdata.ensure(
            self.det_data, detectors=[det], create_units=self.units
        )
        # Loop over views
        views = obs.view[self.view]
        for view in views.detdata[self.det_data]:
            nsample = len(view[det])
            convolved_data[offset : offset + nsample] *= 2 / (1 + epsilon)
            offset += nsample
    if verbose:
        timer.report_clear(f"calibrate detector {det}")
    return

convolve(sky, beam, detector, pnt, det, verbose, pol=None)

Source code in toast/ops/conviqt.py
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
def convolve(self, sky, beam, detector, pnt, det, verbose, pol=None):
    timer = Timer()
    timer.start()
    if pol is None:
        pol = self.pol
    convolver = conviqt.Convolver(
        sky,
        beam,
        detector,
        pol,
        self.lmax,
        self.beammmax,
        self.order,
        self.verbosity,
        self.comm,
    )
    convolver.convolve(pnt)
    if verbose:
        timer.report_clear(f"convolve detector {det}")

    # The pointer to the data will have changed during
    # the convolution call ...

    if pnt._nrow > 0:
        arr = pnt.data()
        convolved_data = arr[:, 3].astype(np.float64)
    else:
        convolved_data = None
    if verbose:
        timer.report_clear(f"extract convolved data for {det}")

    del convolver

    return convolved_data

get_beam(beamfile, det, verbose, pol=None)

Source code in toast/ops/conviqt.py
416
417
418
419
420
421
422
423
424
425
426
def get_beam(self, beamfile, det, verbose, pol=None):
    timer = Timer()
    timer.start()
    if pol is None:
        pol = self.pol
    beam = conviqt.Beam(self.lmax, self.beammmax, pol, beamfile, self.comm)
    if self.normalize_beam:
        beam.normalize()
    if verbose:
        timer.report_clear(f"initialize beam for detector {det}")
    return beam

get_buffer(theta, phi, psi, det, verbose)

Pack the pointing into the conviqt pointing array

Source code in toast/ops/conviqt.py
549
550
551
552
553
554
555
556
557
558
559
560
561
def get_buffer(self, theta, phi, psi, det, verbose):
    """Pack the pointing into the conviqt pointing array"""
    timer = Timer()
    timer.start()
    pnt = conviqt.Pointing(len(theta))
    if pnt._nrow > 0:
        arr = pnt.data()
        arr[:, 0] = phi
        arr[:, 1] = theta
        arr[:, 2] = psi
    if verbose:
        timer.report_clear(f"pack input array for detector {det}")
    return pnt

get_detector(det)

We always create the detector with zero leakage and scale the returned TOD ourselves

Source code in toast/ops/conviqt.py
428
429
430
431
432
433
def get_detector(self, det):
    """We always create the detector with zero leakage and scale
    the returned TOD ourselves
    """
    detector = conviqt.Detector(name=det, epsilon=0)
    return detector

get_pointing(data, det, verbose)

Return the detector pointing as ZYZ Euler angles without the polarization sensitive angle. These angles are to be compatible with Pxx or Dxx frame beam products

Source code in toast/ops/conviqt.py
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
def get_pointing(self, data, det, verbose):
    """Return the detector pointing as ZYZ Euler angles without the
    polarization sensitive angle.  These angles are to be compatible
    with Pxx or Dxx frame beam products
    """
    # We need the three pointing angles to describe the
    # pointing.  local_pointing() returns the attitude quaternions.
    nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
    timer = Timer()
    timer.start()
    all_theta = []
    all_phi = []
    all_psi_det = []
    all_psi_pol = []
    all_psi_beam = []
    all_hwp_angle = []
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        focalplane = obs.telescope.focalplane
        # Loop over views
        views = obs.view[self.view]
        for view in range(len(views)):
            # Get the flags if needed
            flags = None
            if self.apply_flags:
                if self.shared_flags is not None:
                    flags = np.array(views.shared[self.shared_flags][view])
                    flags &= self.shared_flag_mask
                if self.det_flags is not None:
                    detflags = np.array(views.detdata[self.det_flags][view][det])
                    detflags &= self.det_flag_mask
                    if flags is not None:
                        flags |= detflags
                    else:
                        flags = detflags

            # Timestream of detector quaternions
            quats = views.detdata[self.detector_pointing.quats][view][det]
            if verbose:
                timer.report_clear(f"get detector pointing for {det}")

            if flags is not None:
                quats = quats.copy()
                quats[flags != 0] = nullquat
                if verbose:
                    timer.report_clear(f"initialize flags for detector {det}")

            # Note on angles:
            # - psi_det is the angle of right-hand rotation about the line of sight
            #   from the local Southward meridian to the detector polarization
            #   orientation.
            # - psi_pol is the angle from the beam frame to detector polarization
            #   orientation also about the line of sight.
            # - psi_beam is the angle of right-hand rotation about the line of sight
            #   from the local Southward meridian to the beam frame.

            theta, phi, psi_det = qa.to_iso_angles(quats)

            psi_pol = self._get_psi_pol(focalplane, det)
            if self.dxx:
                # Add angle between Dxx (focalplane) and Pxx
                psi_pol += self._get_psi_uv(focalplane, det)

            # Beam orientation
            psi_beam = psi_det - psi_pol

            # Separately we store the psi_pol angle, so that we can recover the
            # angle relative to the local meridian when computing the weights.
            psi_pol = np.ones(psi_det.size) * psi_pol

            if self.hwp_angle is None:
                det_hwp_angle = np.zeros_like(psi_det)
            else:
                hwp_angle = views.shared[self.hwp_angle][view]
                # The HWP angle in the detector frame is the angle in the
                # focalplane frame minus the angle between the focalplane
                # and detector frames
                props = focalplane[det]
                if "gamma" not in props.colnames:
                    msg = (
                        "When using a HWP, the focalplane 'gamma' column must exist"
                    )
                    raise RuntimeError(msg)
                det_hwp_angle = hwp_angle - props["gamma"].to_value(u.radian)
                psi_pol += 2 * det_hwp_angle
            all_hwp_angle.append(det_hwp_angle)
            all_theta.append(theta)
            all_phi.append(phi)
            all_psi_det.append(psi_det)
            all_psi_pol.append(psi_pol)
            all_psi_beam.append(psi_beam)

    if len(all_theta) > 0:
        all_theta = np.hstack(all_theta)
        all_phi = np.hstack(all_phi)
        all_psi_det = np.hstack(all_psi_det)
        all_psi_pol = np.hstack(all_psi_pol)
        all_psi_beam = np.hstack(all_psi_beam)
        all_hwp_angle = np.hstack(all_hwp_angle)
    else:
        # This process has no data for this detector.  Ensure that
        # we return an empty array, not a list
        all_theta = np.array(all_theta)
        all_phi = np.array(all_phi)
        all_psi_det = np.array(all_psi_det)
        all_psi_pol = np.array(all_psi_pol)
        all_psi_beam = np.array(all_psi_beam)
        all_hwp_angle = np.array(all_hwp_angle)

    if verbose:
        timer.report_clear(f"compute pointing angles for detector {det}")
    return all_theta, all_phi, all_psi_det, all_psi_pol, all_psi_beam, all_hwp_angle

get_sky(skyfile, det, verbose, pol=None)

Source code in toast/ops/conviqt.py
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
def get_sky(self, skyfile, det, verbose, pol=None):
    timer = Timer()
    timer.start()
    if pol is None:
        pol = self.pol
    sky = conviqt.Sky(
        self.lmax,
        pol,
        skyfile,
        self.fwhm.to_value(u.arcmin),
        self.comm,
    )
    if self.remove_monopole:
        sky.remove_monopole()
    if self.remove_dipole:
        sky.remove_dipole()
    if verbose:
        timer.report_clear(f"initialize sky for detector {det}")
    return sky

save(data, det, convolved_data, verbose)

Store the convolved data.

Source code in toast/ops/conviqt.py
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
def save(self, data, det, convolved_data, verbose):
    """Store the convolved data."""
    timer = Timer()
    timer.start()
    offset = 0
    scale = unit_conversion(u.K, self.units)
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        # Loop over views
        views = obs.view[self.view]
        for view in views.detdata[self.det_data]:
            nsample = len(view[det])
            view[det] += scale * convolved_data[offset : offset + nsample]
            offset += nsample
    if verbose:
        timer.report_clear(f"save detector {det}")
    return

toast.ops.SimTEBConviqt

Bases: SimConviqt

Operator that uses libconviqt to generate beam-convolved timestreams. This operator should be used in presence of a spinning HWP which makes the beam time-dependent, constantly mapping the co- and cross-polar responses on to each other. In the parent class OpSimConviqt we assume the beam to be static.

The convolution is performed by coupling each IQU component of the signal propertly as: :math:skyT_lm * beamT_lm, skyE_lm * Re{P}, skyB_lm * Im{P}. FIXME : check above math

For extra details please refer to this note

Source code in toast/ops/conviqt.py
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
class SimTEBConviqt(SimConviqt):
    """
    Operator that uses libconviqt to generate beam-convolved timestreams.
    This operator should be used in presence of a spinning HWP which makes the beam time-dependent,
    constantly mapping the co- and cross-polar responses on to each other.
    In the parent class OpSimConviqt we assume the beam to be static.


    The convolution  is performed by  coupling each IQU component of the signal propertly as:
    :math:`skyT_lm * beamT_lm, skyE_lm * Re{P}, skyB_lm * Im{P}`.
    FIXME : check above math

    For extra details please refer to [this note ](https://giuspugl.github.io/reports/Notes_TEB_convolution.html)
    """

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        if not self.available:
            raise RuntimeError("libconviqt is not available")

        if self.comm is None:
            raise RuntimeError("libconviqt requires MPI")

        if self.detector_pointing is None:
            raise RuntimeError("detector_pointing cannot be None.")

        log = Logger.get()

        timer = Timer()
        timer.start()

        self.units = data.detector_units(self.det_data)
        if self.units is None:
            # This means that the data does not yet exist
            self.units = self.det_data_units

        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=detectors)

        all_detectors = self._get_all_detectors(data, detectors)

        for det in all_detectors:
            verbose = self.comm.rank == 0 and self.verbosity > 0

            # Find one process that has focalplane data for this detector
            # and broadcast the focalplane properties

            for obs in data.obs:
                focalplane = obs.telescope.focalplane
                have_det = det in focalplane
                if have_det:
                    break
            have_det_comm = self.comm.allgather(have_det)
            source = np.argwhere(have_det_comm).ravel()[0]
            if self.comm.rank == source:
                det_dict = {}
                for key in focalplane[det].colnames:
                    det_dict[key] = focalplane[det][key]
                det_dict["detector"] = det
                det_dict["mc"] = self.mc
            else:
                det_dict = None
            det_dict = self.comm.bcast(det_dict, root=source)

            # Expand detector pointing
            self.detector_pointing.apply(data, detectors=[det])

            if det in self.sky_file_dict:
                sky_file = self.sky_file_dict[det]
            else:
                sky_file = self.sky_file.format(**det_dict)

            skyT, skyEB, skyBE = self.get_TEB_sky(sky_file, det, verbose)

            if det in self.beam_file_dict:
                beam_file = self.beam_file_dict[det]
            else:
                beam_file = self.beam_file.format(**det_dict)

            beam_T, beam_P = self.get_TP_beam(beam_file, det, verbose)

            detector = self.get_detector(det)

            theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
                data, det, verbose
            )

            # T-convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)

            convolved_data = self.convolve(
                skyT, beam_T, detector, pnt, det, verbose, pol=False
            )

            if self.pol:
                del (pnt,)
                angle_arg = 4.0 * hwp_angle
                # EB-convolution
                pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
                convolved_data += np.cos(angle_arg) * self.convolve(
                    skyEB, beam_P, detector, pnt, det, verbose, pol=True
                )
                del (pnt,)
                # BE-convolution
                pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
                convolved_data += np.sin(angle_arg) * self.convolve(
                    skyBE, beam_P, detector, pnt, det, verbose, pol=True
                )

            del skyEB, skyBE

            del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

            self.calibrate_signal(
                data,
                det,
                beam_T,
                convolved_data,
                verbose,
            )
            self.save(data, det, convolved_data, verbose)

            del pnt, detector, beam_T, beam_P, skyT

            if verbose:
                timer.report_clear(f"conviqt process detector {det}")

        return

    def get_TEB_sky(self, skyfile, det, verbose):
        if os.path.isfile(skyfile):
            skyT = self.get_sky(skyfile, det, verbose, pol=False)
            # generate temporary files to use libconviqt facilities
            slmE = hp.read_alm(skyfile, hdu=2)
            slmB = hp.read_alm(skyfile, hdu=3)
            with tempfile.TemporaryDirectory() as tempdir:
                fname_temp = os.path.join(tempdir, "slm.fits")
                hp.write_alm(
                    fname_temp,
                    np.vstack([slmE * 0, slmE, slmB]),
                    lmax=self.lmax,
                    overwrite=True,
                )
                skyEB = self.get_sky(fname_temp, det, verbose, pol=True)
                hp.write_alm(
                    fname_temp,
                    np.vstack([slmE * 0, slmB, -slmE]),
                    lmax=self.lmax,
                    overwrite=True,
                )
                skyBE = self.get_sky(fname_temp, det, verbose, pol=True)
            del slmE, slmB
        else:
            # Assume the component files are on disk
            skyfile_T = skyfile.replace(".fits", "_T.fits")
            skyfile_EB = skyfile.replace(".fits", "_EB.fits")
            skyfile_BE = skyfile.replace(".fits", "_BE.fits")
            for fname in skyfile_T, skyfile_EB, skyfile_BE:
                if not os.path.isfile(fname):
                    msg = f"No TEB sky at {skyfile} and no component sky at {fname}"
                    raise RuntimeError(msg)
            skyT = self.get_sky(skyfile_T, det, verbose, pol=False)
            skyEB = self.get_sky(skyfile_EB, det, verbose, pol=True)
            skyBE = self.get_sky(skyfile_BE, det, verbose, pol=True)

        return skyT, skyEB, skyBE

    def get_TP_beam(self, beamfile, det, verbose):
        timer = Timer()
        timer.start()
        if os.path.isfile(beamfile):
            beamT = conviqt.Beam(
                lmax=self.lmax,
                mmax=self.beammmax,
                pol=False,
                beamfile=beamfile,
                comm=self.comm,
            )
            # generate temporary files to use libconviqt facilities
            blmE, mmaxE = hp.read_alm(beamfile, hdu=2, return_mmax=True)
            blmB, mmaxB = hp.read_alm(beamfile, hdu=3, return_mmax=True)
            if mmaxE != mmaxB:
                msg = f"Mismatch: mmatE={mmaxE}, mmaxB={mmaxB}"
                raise RuntimeError(msg)
            with tempfile.TemporaryDirectory() as tempdir:
                fname_temp = os.path.join(tempdir, "blm.fits")
                hp.write_alm(
                    fname_temp,
                    np.vstack([blmE * 0, blmE, blmB]),
                    lmax=self.lmax,
                    mmax_in=mmaxE,
                    overwrite=True,
                )
                beamP = conviqt.Beam(
                    lmax=self.lmax,
                    mmax=self.beammmax,
                    pol=True,
                    beamfile=fname_temp,
                    comm=self.comm,
                )
            del blmE, blmB
        else:
            beam_file_T = beamfile.replace(".fits", "_T.fits")
            beamT = conviqt.Beam(
                lmax=self.lmax,
                mmax=self.beammmax,
                pol=False,
                beamfile=beam_file_T,
                comm=self.comm,
            )
            beam_file_P = beamfile.replace(".fits", "_P.fits")
            beamP = conviqt.Beam(
                lmax=self.lmax,
                mmax=self.beammmax,
                pol=True,
                beamfile=beam_file_P,
                comm=self.comm,
            )

        if verbose:
            timer.report_clear(f"initialize beam for detector {det}")
        return beamT, beamP

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/conviqt.py
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    if not self.available:
        raise RuntimeError("libconviqt is not available")

    if self.comm is None:
        raise RuntimeError("libconviqt requires MPI")

    if self.detector_pointing is None:
        raise RuntimeError("detector_pointing cannot be None.")

    log = Logger.get()

    timer = Timer()
    timer.start()

    self.units = data.detector_units(self.det_data)
    if self.units is None:
        # This means that the data does not yet exist
        self.units = self.det_data_units

    # Expand detector pointing
    self.detector_pointing.apply(data, detectors=detectors)

    all_detectors = self._get_all_detectors(data, detectors)

    for det in all_detectors:
        verbose = self.comm.rank == 0 and self.verbosity > 0

        # Find one process that has focalplane data for this detector
        # and broadcast the focalplane properties

        for obs in data.obs:
            focalplane = obs.telescope.focalplane
            have_det = det in focalplane
            if have_det:
                break
        have_det_comm = self.comm.allgather(have_det)
        source = np.argwhere(have_det_comm).ravel()[0]
        if self.comm.rank == source:
            det_dict = {}
            for key in focalplane[det].colnames:
                det_dict[key] = focalplane[det][key]
            det_dict["detector"] = det
            det_dict["mc"] = self.mc
        else:
            det_dict = None
        det_dict = self.comm.bcast(det_dict, root=source)

        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=[det])

        if det in self.sky_file_dict:
            sky_file = self.sky_file_dict[det]
        else:
            sky_file = self.sky_file.format(**det_dict)

        skyT, skyEB, skyBE = self.get_TEB_sky(sky_file, det, verbose)

        if det in self.beam_file_dict:
            beam_file = self.beam_file_dict[det]
        else:
            beam_file = self.beam_file.format(**det_dict)

        beam_T, beam_P = self.get_TP_beam(beam_file, det, verbose)

        detector = self.get_detector(det)

        theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
            data, det, verbose
        )

        # T-convolution
        pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)

        convolved_data = self.convolve(
            skyT, beam_T, detector, pnt, det, verbose, pol=False
        )

        if self.pol:
            del (pnt,)
            angle_arg = 4.0 * hwp_angle
            # EB-convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            convolved_data += np.cos(angle_arg) * self.convolve(
                skyEB, beam_P, detector, pnt, det, verbose, pol=True
            )
            del (pnt,)
            # BE-convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            convolved_data += np.sin(angle_arg) * self.convolve(
                skyBE, beam_P, detector, pnt, det, verbose, pol=True
            )

        del skyEB, skyBE

        del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

        self.calibrate_signal(
            data,
            det,
            beam_T,
            convolved_data,
            verbose,
        )
        self.save(data, det, convolved_data, verbose)

        del pnt, detector, beam_T, beam_P, skyT

        if verbose:
            timer.report_clear(f"conviqt process detector {det}")

    return

get_TEB_sky(skyfile, det, verbose)

Source code in toast/ops/conviqt.py
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
def get_TEB_sky(self, skyfile, det, verbose):
    if os.path.isfile(skyfile):
        skyT = self.get_sky(skyfile, det, verbose, pol=False)
        # generate temporary files to use libconviqt facilities
        slmE = hp.read_alm(skyfile, hdu=2)
        slmB = hp.read_alm(skyfile, hdu=3)
        with tempfile.TemporaryDirectory() as tempdir:
            fname_temp = os.path.join(tempdir, "slm.fits")
            hp.write_alm(
                fname_temp,
                np.vstack([slmE * 0, slmE, slmB]),
                lmax=self.lmax,
                overwrite=True,
            )
            skyEB = self.get_sky(fname_temp, det, verbose, pol=True)
            hp.write_alm(
                fname_temp,
                np.vstack([slmE * 0, slmB, -slmE]),
                lmax=self.lmax,
                overwrite=True,
            )
            skyBE = self.get_sky(fname_temp, det, verbose, pol=True)
        del slmE, slmB
    else:
        # Assume the component files are on disk
        skyfile_T = skyfile.replace(".fits", "_T.fits")
        skyfile_EB = skyfile.replace(".fits", "_EB.fits")
        skyfile_BE = skyfile.replace(".fits", "_BE.fits")
        for fname in skyfile_T, skyfile_EB, skyfile_BE:
            if not os.path.isfile(fname):
                msg = f"No TEB sky at {skyfile} and no component sky at {fname}"
                raise RuntimeError(msg)
        skyT = self.get_sky(skyfile_T, det, verbose, pol=False)
        skyEB = self.get_sky(skyfile_EB, det, verbose, pol=True)
        skyBE = self.get_sky(skyfile_BE, det, verbose, pol=True)

    return skyT, skyEB, skyBE

get_TP_beam(beamfile, det, verbose)

Source code in toast/ops/conviqt.py
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
def get_TP_beam(self, beamfile, det, verbose):
    timer = Timer()
    timer.start()
    if os.path.isfile(beamfile):
        beamT = conviqt.Beam(
            lmax=self.lmax,
            mmax=self.beammmax,
            pol=False,
            beamfile=beamfile,
            comm=self.comm,
        )
        # generate temporary files to use libconviqt facilities
        blmE, mmaxE = hp.read_alm(beamfile, hdu=2, return_mmax=True)
        blmB, mmaxB = hp.read_alm(beamfile, hdu=3, return_mmax=True)
        if mmaxE != mmaxB:
            msg = f"Mismatch: mmatE={mmaxE}, mmaxB={mmaxB}"
            raise RuntimeError(msg)
        with tempfile.TemporaryDirectory() as tempdir:
            fname_temp = os.path.join(tempdir, "blm.fits")
            hp.write_alm(
                fname_temp,
                np.vstack([blmE * 0, blmE, blmB]),
                lmax=self.lmax,
                mmax_in=mmaxE,
                overwrite=True,
            )
            beamP = conviqt.Beam(
                lmax=self.lmax,
                mmax=self.beammmax,
                pol=True,
                beamfile=fname_temp,
                comm=self.comm,
            )
        del blmE, blmB
    else:
        beam_file_T = beamfile.replace(".fits", "_T.fits")
        beamT = conviqt.Beam(
            lmax=self.lmax,
            mmax=self.beammmax,
            pol=False,
            beamfile=beam_file_T,
            comm=self.comm,
        )
        beam_file_P = beamfile.replace(".fits", "_P.fits")
        beamP = conviqt.Beam(
            lmax=self.lmax,
            mmax=self.beammmax,
            pol=True,
            beamfile=beam_file_P,
            comm=self.comm,
        )

    if verbose:
        timer.report_clear(f"initialize beam for detector {det}")
    return beamT, beamP

toast.ops.SimWeightedConviqt

Bases: SimConviqt

Operator which uses libconviqt to generate beam-convolved timestreams. This operator should be used in presence of a spinning HWP which makes the beam time-dependent, constantly mapping the co- and cross polar responses on to each other. In OpSimConviqt we assume the beam to be static.

Source code in toast/ops/conviqt.py
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
class SimWeightedConviqt(SimConviqt):
    """Operator which uses libconviqt to generate beam-convolved timestreams.
    This operator should be used in presence of a spinning  HWP which  makes
    the beam time-dependent, constantly mapping the co- and cross polar
    responses on to each other.  In OpSimConviqt we assume the beam to be static.
    """

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        if not self.available:
            raise RuntimeError("libconviqt is not available")

        if self.comm is None:
            raise RuntimeError("libconviqt requires MPI")

        if self.detector_pointing is None:
            raise RuntimeError("detector_pointing cannot be None.")

        log = Logger.get()

        timer = Timer()
        timer.start()

        self.units = data.detector_units(self.det_data)
        if self.units is None:
            # This means that the data does not yet exist
            self.units = self.det_data_units

        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=detectors)

        all_detectors = self._get_all_detectors(data, detectors)

        for det in all_detectors:
            verbose = self.comm.rank == 0 and self.verbosity > 0

            # Expand detector pointing
            self.detector_pointing.apply(data, detectors=[det])

            if det in self.sky_file_dict:
                sky_file = self.sky_file_dict[det]
            else:
                sky_file = self.sky_file.format(detector=det, mc=self.mc)
            sky = self.get_sky(sky_file, det, verbose)

            if det in self.beam_file_dict:
                beam_file = self.beam_file_dict[det]
            else:
                beam_file = self.beam_file.format(detector=det, mc=self.mc)

            beamI00, beam0I0, beam00I = self.get_beam(beam_file, det, verbose)

            detector = self.get_detector(det)

            theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
                data, det, verbose
            )

            # I-beam convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            convolved_data = self.convolve(sky, beamI00, detector, pnt, det, verbose)
            del pnt

            angle_arg = 2 * psi_pol

            # Q-beam convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            convolved_data += np.cos(angle_arg) * self.convolve(
                sky, beam0I0, detector, pnt, det, verbose
            )
            del pnt

            # U-beam convolution
            pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
            convolved_data += np.sin(angle_arg) * self.convolve(
                sky, beam00I, detector, pnt, det, verbose
            )
            del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

            self.calibrate_signal(
                data,
                det,
                beamI00,
                convolved_data,
                verbose,
            )
            self.save(data, det, convolved_data, verbose)

            del pnt, detector, beamI00, beam0I0, beam00I, sky

            if verbose:
                timer.report_clear(f"conviqt process detector {det}")

        return

    def get_beam(self, beamfile, det, verbose):
        timer = Timer()
        timer.start()
        beam_file_i00 = beamfile.replace(".fits", "_I000.fits")
        beam_file_0i0 = beamfile.replace(".fits", "_0I00.fits")
        beam_file_00i = beamfile.replace(".fits", "_00I0.fits")
        beami00 = conviqt.Beam(
            self.lmax, self.beammmax, self.pol, beam_file_i00, self.comm
        )
        beam0i0 = conviqt.Beam(
            self.lmax, self.beammmax, self.pol, beam_file_0i0, self.comm
        )
        beam00i = conviqt.Beam(
            self.lmax, self.beammmax, self.pol, beam_file_00i, self.comm
        )

        if verbose:
            timer.report_clear(f"initialize beam for detector {det}")
        return beami00, beam0i0, beam00i

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/conviqt.py
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    if not self.available:
        raise RuntimeError("libconviqt is not available")

    if self.comm is None:
        raise RuntimeError("libconviqt requires MPI")

    if self.detector_pointing is None:
        raise RuntimeError("detector_pointing cannot be None.")

    log = Logger.get()

    timer = Timer()
    timer.start()

    self.units = data.detector_units(self.det_data)
    if self.units is None:
        # This means that the data does not yet exist
        self.units = self.det_data_units

    # Expand detector pointing
    self.detector_pointing.apply(data, detectors=detectors)

    all_detectors = self._get_all_detectors(data, detectors)

    for det in all_detectors:
        verbose = self.comm.rank == 0 and self.verbosity > 0

        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=[det])

        if det in self.sky_file_dict:
            sky_file = self.sky_file_dict[det]
        else:
            sky_file = self.sky_file.format(detector=det, mc=self.mc)
        sky = self.get_sky(sky_file, det, verbose)

        if det in self.beam_file_dict:
            beam_file = self.beam_file_dict[det]
        else:
            beam_file = self.beam_file.format(detector=det, mc=self.mc)

        beamI00, beam0I0, beam00I = self.get_beam(beam_file, det, verbose)

        detector = self.get_detector(det)

        theta, phi, psi_det, psi_pol, psi_beam, hwp_angle = self.get_pointing(
            data, det, verbose
        )

        # I-beam convolution
        pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
        convolved_data = self.convolve(sky, beamI00, detector, pnt, det, verbose)
        del pnt

        angle_arg = 2 * psi_pol

        # Q-beam convolution
        pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
        convolved_data += np.cos(angle_arg) * self.convolve(
            sky, beam0I0, detector, pnt, det, verbose
        )
        del pnt

        # U-beam convolution
        pnt = self.get_buffer(theta, phi, psi_beam, det, verbose)
        convolved_data += np.sin(angle_arg) * self.convolve(
            sky, beam00I, detector, pnt, det, verbose
        )
        del theta, phi, psi_det, psi_pol, psi_beam, hwp_angle

        self.calibrate_signal(
            data,
            det,
            beamI00,
            convolved_data,
            verbose,
        )
        self.save(data, det, convolved_data, verbose)

        del pnt, detector, beamI00, beam0I0, beam00I, sky

        if verbose:
            timer.report_clear(f"conviqt process detector {det}")

    return

get_beam(beamfile, det, verbose)

Source code in toast/ops/conviqt.py
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
def get_beam(self, beamfile, det, verbose):
    timer = Timer()
    timer.start()
    beam_file_i00 = beamfile.replace(".fits", "_I000.fits")
    beam_file_0i0 = beamfile.replace(".fits", "_0I00.fits")
    beam_file_00i = beamfile.replace(".fits", "_00I0.fits")
    beami00 = conviqt.Beam(
        self.lmax, self.beammmax, self.pol, beam_file_i00, self.comm
    )
    beam0i0 = conviqt.Beam(
        self.lmax, self.beammmax, self.pol, beam_file_0i0, self.comm
    )
    beam00i = conviqt.Beam(
        self.lmax, self.beammmax, self.pol, beam_file_00i, self.comm
    )

    if verbose:
        timer.report_clear(f"initialize beam for detector {det}")
    return beami00, beam0i0, beam00i

toast.ops.SimTotalconvolve

Bases: Operator

Operator which uses ducc0.totalconvolve to generate beam-convolved timestreams.

Source code in toast/ops/totalconvolve.py
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
@trait_docs
class SimTotalconvolve(Operator):
    """Operator which uses ducc0.totalconvolve to generate beam-convolved timestreams."""

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    comm = Instance(
        klass=MPI_Comm,
        allow_none=True,
        help="MPI communicator to use for the convolution.",
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight pointing into detector frame",
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid, help="Bit mask value for detector sample flagging"
    )

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for telescope flags to use",
    )

    shared_flag_mask = Int(
        defaults.shared_mask_invalid, help="Bit mask value for optional flagging"
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    det_data = Unicode(
        defaults.det_data,
        allow_none=False,
        help="Observation detdata key for accumulating convolved timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    calibrate = Bool(
        True,
        allow_none=False,
        help="Calibrate intensity to 1.0, rather than (1 + epsilon) / 2. "
        "Calibrate has no effect if the beam is found to be normalized rather than "
        "scaled with the leakage factor.",
    )

    dxx = Bool(
        True,
        allow_none=False,
        help="The beam frame is either Dxx or Pxx. Pxx includes the rotation to "
        "polarization sensitive basis, Dxx does not. When Dxx=True, detector "
        "orientation from attitude quaternions is corrected for the polarization "
        "angle.",
    )

    pol = Bool(
        True,
        allow_none=False,
        help="Toggle simulated signal polarization",
    )

    mc = Int(
        None,
        allow_none=True,
        help="Monte Carlo index used in synthesizing the input file names.",
    )

    @traitlets.validate("mc")
    def _check_mc(self, proposal):
        check = proposal["value"]
        if check is not None and check < 0:
            raise traitlets.TraitError("MC index cannot be negative")
        return check

    beammmax = Int(
        -1,
        allow_none=False,
        help="Beam maximum m.  Actual resolution in the Healpix FITS file may differ. "
        "If not set, will use the maximum expansion order from file.",
    )

    oversampling_factor = Float(
        1.8,
        allow_none=False,
        help="Oversampling factor for total convolution (useful range is 1.5-2.0)",
    )

    epsilon = Float(
        1e-5,
        allow_none=False,
        help="Relative accuracy of the interpolation step",
    )

    lmax = Int(
        -1,
        allow_none=False,
        help="Maximum ell (and m).  Actual resolution in the Healpix FITS file may "
        "differ.  If not set, will use the maximum expansion order from file.",
    )

    verbosity = Int(
        0,
        allow_none=False,
        help="",
    )

    normalize_beam = Bool(
        False,
        allow_none=False,
        help="Normalize beam to have unit response to temperature monopole.",
    )

    remove_dipole = Bool(
        False,
        allow_none=False,
        help="Suppress the temperature dipole in sky_file.",
    )

    remove_monopole = Bool(
        False,
        allow_none=False,
        help="Suppress the temperature monopole in sky_file.",
    )

    apply_flags = Bool(
        False,
        allow_none=False,
        help="Only synthesize signal for unflagged samples.",
    )

    fwhm = Quantity(
        4.0 * u.arcmin,
        allow_none=False,
        help="Width of a symmetric gaussian beam already present in the skyfile "
        "(will be deconvolved away).",
    )

    sky_file_dict = Dict(
        None,
        allow_none=True,
        help="Dictionary of files containing the sky a_lm expansions. An entry for "
        "each detector name must be present. If provided, supersedes `sky_file`.",
    )

    sky_file = Unicode(
        None,
        allow_none=True,
        help="File containing the sky a_lm expansion.  Tag {detector} will be "
        "replaced with the detector name",
    )

    beam_file_dict = Dict(
        None,
        allow_none=True,
        help="Dictionary of files containing the beam a_lm expansions. An entry for "
        "each detector name must be present. If provided, supersedes `beam_file`.",
    )

    beam_file = Unicode(
        None,
        allow_none=True,
        help="File containing the beam a_lm expansion.  Tag {detector} will be "
        "replaced with the detector name.",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("shared_flag_mask")
    def _check_shared_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Shared flag mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        return

    @property
    def available(self):
        """Return True if ducc0.totalconvolve is found in the library search path."""
        return totalconvolve is not None

    hwp_angle = Unicode(
        None, allow_none=True, help="Observation shared key for HWP angle"
    )

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        if not self.available:
            raise RuntimeError("ducc0.totalconvolve is not available")

        if self.detector_pointing is None:
            raise RuntimeError("detector_pointing cannot be None.")

        log = Logger.get()

        timer = Timer()
        timer.start()

        env = Environment.get()
        nthreads = env.max_threads()

        verbose = self.verbosity > 0
        if use_mpi:
            self.comm.barrier()
            verbose = verbose and self.comm.rank == 0
            if self.comm.size > 1 and self.comm.rank == 0:
                log.warning(
                    "communicator size>1: totalconvolve will work, "
                    "but will waste memory. To be fixed in future releases."
                )

        all_detectors = self._get_all_detectors(data, detectors)

        for det in all_detectors:
            # Expand detector pointing
            self.detector_pointing.apply(data, detectors=[det])

            if det in self.sky_file_dict:
                sky_file = self.sky_file_dict[det]
            else:
                sky_file = self.sky_file.format(detector=det, mc=self.mc)

            if det in self.beam_file_dict:
                beam_file = self.beam_file_dict[det]
            else:
                beam_file = self.beam_file.format(detector=det, mc=self.mc)

            lmax, mmax = self.get_lmmax(sky_file, beam_file)
            sky = self.get_sky(sky_file, lmax, det, verbose)
            beam = self.get_beam(beam_file, lmax, mmax, det, verbose)

            theta, phi, psi, psi_pol = self.get_pointing(data, det, verbose)
            pnt = self.get_buffer(theta, phi, psi, det, verbose)
            del theta, phi, psi
            if self.hwp_angle is None:
                psi_pol = None

            convolved_data = self.convolve(
                sky, beam, lmax, mmax, pnt, psi_pol, det, nthreads, verbose
            )
            del psi_pol

            self.calibrate_signal(data, det, beam, convolved_data, verbose)
            self.save(data, det, convolved_data, verbose)

            del pnt, beam, sky

            if verbose:
                timer.report_clear(f"totalconvolve process detector {det}")

        return

    def _get_all_detectors(self, data, detectors):
        """Assemble a list of detectors across all processes and
        observations in `self._comm`.
        """
        my_dets = set()
        for obs in data.obs:
            # Get the detectors we are using for this observation
            obs_dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
            for det in obs_dets:
                my_dets.add(det)
            # Make sure detector data output exists
            exists = obs.detdata.ensure(
                self.det_data, detectors=detectors, create_units=self.det_data_units
            )
        if use_mpi:
            all_dets = self.comm.gather(my_dets, root=0)
            if self.comm.rank == 0:
                for some_dets in all_dets:
                    my_dets.update(some_dets)
                my_dets = sorted(my_dets)
            all_dets = self.comm.bcast(my_dets, root=0)
        else:
            all_dets = my_dets
        return all_dets

    def _get_psi_pol(self, focalplane, det):
        """Parse polarization angle in radians from the focalplane
        dictionary.  The angle is relative to the Pxx basis.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "psi_pol" in props.colnames:
            psi_pol = props["pol_angle"].to_value(u.radian)
        elif "pol_angle" in props.colnames:
            warnings.warn(
                "Use psi_pol and psi_uv rather than pol_angle", DeprecationWarning
            )
            psi_pol = props["pol_angle"].to_value(u.radian)
        else:
            raise RuntimeError(f"focalplane[{det}] does not include psi_pol")
        return psi_pol

    def _get_psi_uv(self, focalplane, det):
        """Parse Pxx basis angle in radians from the focalplane
        dictionary.  The angle is measured from Dxx to Pxx basis.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "psi_uv_deg" in props.colnames:
            psi_uv = props["psi_uv"].to_value(u.radian)
        else:
            raise RuntimeError(f"focalplane[{det}] does not include psi_uv")
        return psi_uv

    def _get_epsilon(self, focalplane, det):
        """Parse polarization leakage (epsilon) from the focalplane
        object or dictionary.
        """
        if det not in focalplane:
            raise RuntimeError(f"focalplane does not include {det}")
        props = focalplane[det]
        if "pol_leakage" in props.colnames:
            epsilon = focalplane[det]["pol_leakage"]
        else:
            # Assume zero polarization leakage
            epsilon = 0
        return epsilon

    def get_lmmax(self, skyfile, beamfile):
        """Determine the actual lmax and beammmax to use for the convolution
        from class parameters and values in the files.
        """
        ncomp = 3 if self.pol else 1
        slmax, blmax, bmmax = -1, -1, -1
        for i in range(ncomp):
            # for sky and beam respectively, lmax is the max of all components
            alm_tmp, mmax_tmp = hp.fitsfunc.read_alm(
                skyfile, hdu=i + 1, return_mmax=True
            )
            lmax_tmp = hp.sphtfunc.Alm.getlmax(alm_tmp.shape[0], mmax_tmp)
            slmax = max(slmax, lmax_tmp)
            alm_tmp, mmax_tmp = hp.fitsfunc.read_alm(
                beamfile, hdu=i + 1, return_mmax=True
            )
            lmax_tmp = hp.sphtfunc.Alm.getlmax(alm_tmp.shape[0], mmax_tmp)
            blmax = max(blmax, lmax_tmp)
            # for the beam, determine also the largest mmax present
            bmmax = max(bmmax, mmax_tmp)
        # no need to go higher than the lower of the lmax from sky and beam
        lmax_out = min(slmax, blmax)
        mmax_out = bmmax
        # if parameters are lower than the detected values, reduce even further
        if self.lmax != -1:
            lmax_out = min(lmax_out, self.lmax)
        if self.beammmax != -1:
            mmax_out = min(mmax_out, self.beammmax)
        return lmax_out, mmax_out

    def load_alm(self, file, lmax, mmax):
        def read_comp(file, comp, out):
            almX, mmaxX = hp.fitsfunc.read_alm(file, hdu=comp + 1, return_mmax=True)
            lmaxX = hp.sphtfunc.Alm.getlmax(almX.shape[0], mmaxX)

            ofs1, ofs2 = 0, 0
            mylmax = min(lmax, lmaxX)
            for m in range(0, min(mmax, mmaxX) + 1):
                out[comp, ofs1 : ofs1 + mylmax - m + 1] = almX[
                    ofs2 : ofs2 + mylmax - m + 1
                ]
                ofs1 += lmax - m + 1
                ofs2 += lmaxX - m + 1

        ncomp = 3 if self.pol else 1
        res = np.zeros(
            (ncomp, hp.sphtfunc.Alm.getsize(lmax, mmax)), dtype=np.complex128
        )
        for i in range(ncomp):
            read_comp(file, i, res)
        return res

    def get_sky(self, skyfile, lmax, det, verbose):
        timer = Timer()
        timer.start()
        sky = self.load_alm(skyfile, lmax, lmax)
        fwhm = self.fwhm.to_value(u.radian)
        if fwhm != 0:
            gauss = hp.sphtfunc.gauss_beam(fwhm, lmax, pol=True)
            for i in range(sky.shape[0]):
                sky[i] = hp.sphtfunc.almxfl(
                    sky[i], 1.0 / gauss[:, i], mmax=lmax, inplace=True
                )
        if self.remove_monopole:
            sky[0, 0] = 0
        if self.remove_dipole:
            sky[0, 1] = 0
            sky[0, lmax + 1] = 0
        if verbose:
            timer.report_clear(f"initialize sky for detector {det}")
        return sky

    def get_beam(self, beamfile, lmax, mmax, det, verbose):
        timer = Timer()
        timer.start()
        beam = self.load_alm(beamfile, lmax, mmax)
        if self.normalize_beam:
            beam *= 1.0 / (2 * np.sqrt(np.pi) * beam[0, 0])
        if verbose:
            timer.report_clear(f"initialize beam for detector {det}")
        return beam

    def get_pointing(self, data, det, verbose):
        """Return the detector pointing as ZYZ Euler angles without the
        polarization sensitive angle.  These angles are to be compatible
        with Pxx or Dxx frame beam products
        """
        # We need the three pointing angles to describe the
        # pointing.  local_pointing() returns the attitude quaternions.
        nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
        timer = Timer()
        timer.start()
        all_theta, all_phi, all_psi, all_psi_pol = [], [], [], []
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            focalplane = obs.telescope.focalplane
            # Loop over views
            views = obs.view[self.view]
            for view in range(len(views)):
                # Get the flags if needed
                flags = None
                if self.apply_flags:
                    if self.shared_flags is not None:
                        flags = np.array(views.shared[self.shared_flags][view])
                        flags &= self.shared_flag_mask
                    if self.det_flags is not None:
                        detflags = np.array(views.detdata[self.det_flags][view][det])
                        detflags &= self.det_flag_mask
                        if flags is not None:
                            flags |= detflags
                        else:
                            flags = detflags

                # Timestream of detector quaternions
                quats = views.detdata[self.detector_pointing.quats][view][det]
                if verbose:
                    timer.report_clear(f"get detector pointing for {det}")

                if flags is not None:
                    quats = quats.copy()
                    quats[flags != 0] = nullquat
                    if verbose:
                        timer.report_clear(f"initialize flags for detector {det}")

                theta, phi, psi = qa.to_iso_angles(quats)
                # Polarization angle in the Pxx basis
                psi_pol = self._get_psi_pol(focalplane, det)
                if self.dxx:
                    # Add angle between Dxx and Pxx
                    psi_pol += self._get_psi_uv(focalplane, det)
                psi -= psi_pol
                psi_pol = np.ones(psi.size) * psi_pol
                if self.hwp_angle is not None:
                    hwp_angle = views.shared[self.hwp_angle][view]
                    psi_pol += 2 * hwp_angle
                all_theta.append(theta)
                all_phi.append(phi)
                all_psi.append(psi)
                all_psi_pol.append(psi_pol)

        if len(all_theta) > 0:
            all_theta = np.hstack(all_theta)
            all_phi = np.hstack(all_phi)
            all_psi = np.hstack(all_psi)
            all_psi_pol = np.hstack(all_psi_pol)

        if verbose:
            timer.report_clear(f"compute pointing angles for detector {det}")
        return all_theta, all_phi, all_psi, all_psi_pol

    def get_buffer(self, theta, phi, psi, det, verbose):
        """Pack the pointing into the pointing array"""
        timer = Timer()
        timer.start()
        pnt = np.empty((len(theta), 3))
        pnt[:, 0] = theta
        pnt[:, 1] = phi
        pnt[:, 2] = psi
        pnt[:, 2] += np.pi  # FIXME: not clear yet why this is necessary
        if verbose:
            timer.report_clear(f"pack input array for detector {det}")
        return pnt

    # simple approach when there is only one task
    def conv_and_interpol_serial(
        self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
    ):
        t_conv.start()
        plan = totalconvolve.ConvolverPlan(
            lmax=lmax,
            kmax=mmax,
            sigma=self.oversampling_factor,
            epsilon=self.epsilon,
            nthreads=nthreads,
        )
        cube = np.empty((plan.Npsi(), plan.Ntheta(), plan.Nphi()), dtype=np.float64)
        cube[()] = 0

        # convolution part
        for icomp in range(skycomp.shape[0]):
            plan.getPlane(skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1])
            for mbeam in range(1, mmax + 1):
                plan.getPlane(
                    skycomp[icomp, :],
                    beamcomp[icomp, :],
                    mbeam,
                    cube[2 * mbeam - 1 : 2 * mbeam + 1],
                )

        plan.prepPsi(cube)
        t_conv.stop()
        t_inter.start()
        res = np.empty(pnt.shape[0], dtype=np.float64)
        plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
        t_inter.stop()
        return res

    # MPI version storing the full data cube at every MPI task (wasteful)
    def conv_and_interpol_mpi(
        self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
    ):
        t_conv.start()
        plan = totalconvolve.ConvolverPlan(
            lmax=lmax,
            kmax=mmax,
            sigma=self.oversampling_factor,
            epsilon=self.epsilon,
            nthreads=nthreads,
        )
        myrank, nranks = self.comm.rank, self.comm.size
        cube = np.empty((plan.Npsi(), plan.Ntheta(), plan.Nphi()), dtype=np.float64)
        cube[()] = 0

        # convolution part
        # the work in this nested loop can be distributed over skycomp.shape[0]*(mmax+1) tasks
        for icomp in range(skycomp.shape[0]):
            if (icomp * (mmax + 1)) % nranks == myrank:
                plan.getPlane(skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1])
            for mbeam in range(1, mmax + 1):
                if (icomp * (mmax + 1) + mbeam) % nranks == myrank:
                    plan.getPlane(
                        skycomp[icomp, :],
                        beamcomp[icomp, :],
                        mbeam,
                        cube[2 * mbeam - 1 : 2 * mbeam + 1],
                    )
        if nranks > 1:  # broadcast the results
            for icomp in range(skycomp.shape[0]):
                self.comm.Bcast(
                    [cube[0:1], MPI.DOUBLE], root=(icomp * (mmax + 1)) % nranks
                )
            for mbeam in range(1, mmax + 1):
                self.comm.Bcast(
                    [cube[2 * mbeam - 1 : 2 * mbeam + 1], MPI.DOUBLE],
                    root=(icomp * (mmax + 1) + mbeam) % nranks,
                )

        plan.prepPsi(cube)
        t_conv.stop()
        t_inter.start()
        res = np.empty(pnt.shape[0], dtype=np.float64)
        plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
        t_inter.stop()
        return res

    # MPI version with shared memory tricks, storing the full data cube only
    # once per node.
    def conv_and_interpol_mpi_shmem(
        self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
    ):
        from pshmem import MPIShared

        t_conv.start()
        plan = totalconvolve.ConvolverPlan(
            lmax=lmax,
            kmax=mmax,
            sigma=self.oversampling_factor,
            epsilon=self.epsilon,
            nthreads=nthreads,
        )

        with MPIShared(
            (plan.Npsi(), plan.Ntheta(), plan.Nphi()), np.float64, self.comm
        ) as shm:
            cube = shm.data
            # Create a separate communicator on every node.
            intracomm = self.comm.Split_type(MPI.COMM_TYPE_SHARED)
            # Create a communicator with all master tasks of the intracomms;
            # on every other task, intercomm will be MPI.COMM_NULL.
            color = 0 if intracomm.rank == 0 else MPI.UNDEFINED
            intercomm = self.comm.Split(color)
            if intracomm.rank == 0:
                # We are on the master task of intracomm and all other tasks on
                # the node will be idle during the next computation step,
                # so we can hijack all their threads.
                nodeplan = totalconvolve.ConvolverPlan(
                    lmax=lmax,
                    kmax=mmax,
                    sigma=self.oversampling_factor,
                    epsilon=self.epsilon,
                    nthreads=intracomm.size * nthreads,
                )
                mynode, nnodes = intercomm.rank, intercomm.size
                cube[()] = 0.0

                # Convolution part
                # The skycomp.shape[0]*(mmax+1) work items in this nested loop
                # are distributed among the nodes in a round-robin fashion.
                for icomp in range(skycomp.shape[0]):
                    if (icomp * (mmax + 1)) % nnodes == mynode:
                        nodeplan.getPlane(
                            skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1]
                        )
                    for mbeam in range(1, mmax + 1):
                        if (icomp * (mmax + 1) + mbeam) % nnodes == mynode:
                            nodeplan.getPlane(
                                skycomp[icomp, :],
                                beamcomp[icomp, :],
                                mbeam,
                                cube[2 * mbeam - 1 : 2 * mbeam + 1],
                            )
                if nnodes > 1:  # results must be broadcast to all nodes
                    for icomp in range(skycomp.shape[0]):
                        intercomm.Bcast(
                            [cube[0:1], MPI.DOUBLE], root=(icomp * (mmax + 1)) % nnodes
                        )
                    for mbeam in range(1, mmax + 1):
                        intercomm.Bcast(
                            [cube[2 * mbeam - 1 : 2 * mbeam + 1], MPI.DOUBLE],
                            root=(icomp * (mmax + 1) + mbeam) % nnodes,
                        )
                nodeplan.prepPsi(cube)
                del nodeplan

            t_conv.stop()
            t_inter.start()
            # Interpolation part
            # No fancy communication is necessary here, since every task has
            # access to the full data cube.
            res = np.empty(pnt.shape[0], dtype=np.float64)
            plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
            t_inter.stop()
            del plan
            del cube
        return res

    def conv_and_interpol(
        self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
    ):
        if (not use_mpi) or (self.comm.size == 1):
            return self.conv_and_interpol_serial(
                skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
            )
        else:
            return self.conv_and_interpol_mpi_shmem(
                skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
            )

    def convolve(self, sky, beam, lmax, mmax, pnt, psi_pol, det, nthreads, verbose):
        t_conv = Timer()
        t_inter = Timer()

        if self.hwp_angle is None:
            # simply compute TT+EE+BB
            convolved_data = self.conv_and_interpol(
                np.array(sky),
                np.array(beam),
                lmax,
                mmax,
                pnt,
                nthreads,
                t_conv,
                t_inter,
            )
        else:
            # TT
            convolved_data = self.conv_and_interpol(
                np.array([sky[0]]),
                np.array([beam[0]]),
                lmax,
                mmax,
                pnt,
                nthreads,
                t_conv,
                t_inter,
            )
            if self.pol:
                # EE+BB
                slm = np.array([sky[1], sky[2]])
                blm = np.array([beam[1], beam[2]])
                convolved_data += np.cos(4 * psi_pol) * self.conv_and_interpol(
                    slm, blm, lmax, mmax, pnt, nthreads, t_conv, t_inter
                ).reshape((-1,))
                # -EB+BE
                blm = np.array([-beam[2], beam[1]])
                convolved_data += np.sin(4 * psi_pol) * self.conv_and_interpol(
                    slm, blm, lmax, mmax, pnt, nthreads, t_conv, t_inter
                ).reshape((-1,))

        if verbose:
            t_conv.report_clear(f"convolve detector {det}")
            t_inter.report_clear(f"extract convolved data for {det}")

        convolved_data *= 0.5  # FIXME: not sure where this factor comes from
        return convolved_data

    def calibrate_signal(self, data, det, beam, convolved_data, verbose):
        """By default, libConviqt results returns a signal that conforms to
        TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

        When calibrate = True, we rescale the TOD to
        TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization
        """
        if not self.calibrate:  # or beam.normalized():
            return

        timer = Timer()
        timer.start()
        offset = 0
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            focalplane = obs.telescope.focalplane
            epsilon = self._get_epsilon(focalplane, det)
            # Make sure detector data output exists
            exists = obs.detdata.ensure(
                self.det_data, detectors=[det], create_units=self.det_data_units
            )
            # Loop over views
            views = obs.view[self.view]
            for view in views.detdata[self.det_data]:
                nsample = len(view[det])
                convolved_data[offset : offset + nsample] *= 2 / (1 + epsilon)
                offset += nsample
        if verbose:
            timer.report_clear(f"calibrate detector {det}")
        return

    def save(self, data, det, convolved_data, verbose):
        """Store the convolved data."""
        timer = Timer()
        timer.start()
        offset = 0
        for obs in data.obs:
            if det not in obs.local_detectors:
                continue
            # Loop over views
            views = obs.view[self.view]
            for view in views.detdata[self.det_data]:
                nsample = len(view[det])
                view[det] += convolved_data[offset : offset + nsample]
                offset += nsample
        if verbose:
            timer.report_clear(f"save detector {det}")
        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = self.detector_pointing.requires()
        req["global"].extend([self.pixel_dist, self.covariance])
        req["meta"].extend([self.noise_model])
        req["shared"] = [self.boresight]
        if self.shared_flags is not None:
            req["shared"].append(self.shared_flags)
        if self.det_flags is not None:
            req["detdata"].append(self.det_flags)
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = self.detector_pointing.provides()
        prov["detdata"].append(self.det_data)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

apply_flags = Bool(False, allow_none=False, help='Only synthesize signal for unflagged samples.') class-attribute instance-attribute

available property

Return True if ducc0.totalconvolve is found in the library search path.

beam_file = Unicode(None, allow_none=True, help='File containing the beam a_lm expansion. Tag {detector} will be replaced with the detector name.') class-attribute instance-attribute

beam_file_dict = Dict(None, allow_none=True, help='Dictionary of files containing the beam a_lm expansions. An entry for each detector name must be present. If provided, supersedes `beam_file`.') class-attribute instance-attribute

beammmax = Int(-1, allow_none=False, help='Beam maximum m. Actual resolution in the Healpix FITS file may differ. If not set, will use the maximum expansion order from file.') class-attribute instance-attribute

calibrate = Bool(True, allow_none=False, help='Calibrate intensity to 1.0, rather than (1 + epsilon) / 2. Calibrate has no effect if the beam is found to be normalized rather than scaled with the leakage factor.') class-attribute instance-attribute

comm = Instance(klass=MPI_Comm, allow_none=True, help='MPI communicator to use for the convolution.') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, allow_none=False, help='Observation detdata key for accumulating convolved timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight pointing into detector frame') class-attribute instance-attribute

dxx = Bool(True, allow_none=False, help='The beam frame is either Dxx or Pxx. Pxx includes the rotation to polarization sensitive basis, Dxx does not. When Dxx=True, detector orientation from attitude quaternions is corrected for the polarization angle.') class-attribute instance-attribute

epsilon = Float(1e-05, allow_none=False, help='Relative accuracy of the interpolation step') class-attribute instance-attribute

fwhm = Quantity(4.0 * u.arcmin, allow_none=False, help='Width of a symmetric gaussian beam already present in the skyfile (will be deconvolved away).') class-attribute instance-attribute

hwp_angle = Unicode(None, allow_none=True, help='Observation shared key for HWP angle') class-attribute instance-attribute

lmax = Int(-1, allow_none=False, help='Maximum ell (and m). Actual resolution in the Healpix FITS file may differ. If not set, will use the maximum expansion order from file.') class-attribute instance-attribute

mc = Int(None, allow_none=True, help='Monte Carlo index used in synthesizing the input file names.') class-attribute instance-attribute

normalize_beam = Bool(False, allow_none=False, help='Normalize beam to have unit response to temperature monopole.') class-attribute instance-attribute

oversampling_factor = Float(1.8, allow_none=False, help='Oversampling factor for total convolution (useful range is 1.5-2.0)') class-attribute instance-attribute

pol = Bool(True, allow_none=False, help='Toggle simulated signal polarization') class-attribute instance-attribute

remove_dipole = Bool(False, allow_none=False, help='Suppress the temperature dipole in sky_file.') class-attribute instance-attribute

remove_monopole = Bool(False, allow_none=False, help='Suppress the temperature monopole in sky_file.') class-attribute instance-attribute

shared_flag_mask = Int(defaults.shared_mask_invalid, help='Bit mask value for optional flagging') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for telescope flags to use') class-attribute instance-attribute

sky_file = Unicode(None, allow_none=True, help='File containing the sky a_lm expansion. Tag {detector} will be replaced with the detector name') class-attribute instance-attribute

sky_file_dict = Dict(None, allow_none=True, help='Dictionary of files containing the sky a_lm expansions. An entry for each detector name must be present. If provided, supersedes `sky_file`.') class-attribute instance-attribute

verbosity = Int(0, allow_none=False, help='') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/totalconvolve.py
252
253
254
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    return

_check_det_flag_mask(proposal)

Source code in toast/ops/totalconvolve.py
245
246
247
248
249
250
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/totalconvolve.py
231
232
233
234
235
236
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_mc(proposal)

Source code in toast/ops/totalconvolve.py
133
134
135
136
137
138
@traitlets.validate("mc")
def _check_mc(self, proposal):
    check = proposal["value"]
    if check is not None and check < 0:
        raise traitlets.TraitError("MC index cannot be negative")
    return check

_check_shared_flag_mask(proposal)

Source code in toast/ops/totalconvolve.py
238
239
240
241
242
243
@traitlets.validate("shared_flag_mask")
def _check_shared_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Shared flag mask should be a positive integer")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/totalconvolve.py
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    if not self.available:
        raise RuntimeError("ducc0.totalconvolve is not available")

    if self.detector_pointing is None:
        raise RuntimeError("detector_pointing cannot be None.")

    log = Logger.get()

    timer = Timer()
    timer.start()

    env = Environment.get()
    nthreads = env.max_threads()

    verbose = self.verbosity > 0
    if use_mpi:
        self.comm.barrier()
        verbose = verbose and self.comm.rank == 0
        if self.comm.size > 1 and self.comm.rank == 0:
            log.warning(
                "communicator size>1: totalconvolve will work, "
                "but will waste memory. To be fixed in future releases."
            )

    all_detectors = self._get_all_detectors(data, detectors)

    for det in all_detectors:
        # Expand detector pointing
        self.detector_pointing.apply(data, detectors=[det])

        if det in self.sky_file_dict:
            sky_file = self.sky_file_dict[det]
        else:
            sky_file = self.sky_file.format(detector=det, mc=self.mc)

        if det in self.beam_file_dict:
            beam_file = self.beam_file_dict[det]
        else:
            beam_file = self.beam_file.format(detector=det, mc=self.mc)

        lmax, mmax = self.get_lmmax(sky_file, beam_file)
        sky = self.get_sky(sky_file, lmax, det, verbose)
        beam = self.get_beam(beam_file, lmax, mmax, det, verbose)

        theta, phi, psi, psi_pol = self.get_pointing(data, det, verbose)
        pnt = self.get_buffer(theta, phi, psi, det, verbose)
        del theta, phi, psi
        if self.hwp_angle is None:
            psi_pol = None

        convolved_data = self.convolve(
            sky, beam, lmax, mmax, pnt, psi_pol, det, nthreads, verbose
        )
        del psi_pol

        self.calibrate_signal(data, det, beam, convolved_data, verbose)
        self.save(data, det, convolved_data, verbose)

        del pnt, beam, sky

        if verbose:
            timer.report_clear(f"totalconvolve process detector {det}")

    return

_finalize(data, **kwargs)

Source code in toast/ops/totalconvolve.py
839
840
def _finalize(self, data, **kwargs):
    return

_get_all_detectors(data, detectors)

Assemble a list of detectors across all processes and observations in self._comm.

Source code in toast/ops/totalconvolve.py
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def _get_all_detectors(self, data, detectors):
    """Assemble a list of detectors across all processes and
    observations in `self._comm`.
    """
    my_dets = set()
    for obs in data.obs:
        # Get the detectors we are using for this observation
        obs_dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
        for det in obs_dets:
            my_dets.add(det)
        # Make sure detector data output exists
        exists = obs.detdata.ensure(
            self.det_data, detectors=detectors, create_units=self.det_data_units
        )
    if use_mpi:
        all_dets = self.comm.gather(my_dets, root=0)
        if self.comm.rank == 0:
            for some_dets in all_dets:
                my_dets.update(some_dets)
            my_dets = sorted(my_dets)
        all_dets = self.comm.bcast(my_dets, root=0)
    else:
        all_dets = my_dets
    return all_dets

_get_epsilon(focalplane, det)

Parse polarization leakage (epsilon) from the focalplane object or dictionary.

Source code in toast/ops/totalconvolve.py
388
389
390
391
392
393
394
395
396
397
398
399
400
def _get_epsilon(self, focalplane, det):
    """Parse polarization leakage (epsilon) from the focalplane
    object or dictionary.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "pol_leakage" in props.colnames:
        epsilon = focalplane[det]["pol_leakage"]
    else:
        # Assume zero polarization leakage
        epsilon = 0
    return epsilon

_get_psi_pol(focalplane, det)

Parse polarization angle in radians from the focalplane dictionary. The angle is relative to the Pxx basis.

Source code in toast/ops/totalconvolve.py
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
def _get_psi_pol(self, focalplane, det):
    """Parse polarization angle in radians from the focalplane
    dictionary.  The angle is relative to the Pxx basis.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "psi_pol" in props.colnames:
        psi_pol = props["pol_angle"].to_value(u.radian)
    elif "pol_angle" in props.colnames:
        warnings.warn(
            "Use psi_pol and psi_uv rather than pol_angle", DeprecationWarning
        )
        psi_pol = props["pol_angle"].to_value(u.radian)
    else:
        raise RuntimeError(f"focalplane[{det}] does not include psi_pol")
    return psi_pol

_get_psi_uv(focalplane, det)

Parse Pxx basis angle in radians from the focalplane dictionary. The angle is measured from Dxx to Pxx basis.

Source code in toast/ops/totalconvolve.py
375
376
377
378
379
380
381
382
383
384
385
386
def _get_psi_uv(self, focalplane, det):
    """Parse Pxx basis angle in radians from the focalplane
    dictionary.  The angle is measured from Dxx to Pxx basis.
    """
    if det not in focalplane:
        raise RuntimeError(f"focalplane does not include {det}")
    props = focalplane[det]
    if "psi_uv_deg" in props.colnames:
        psi_uv = props["psi_uv"].to_value(u.radian)
    else:
        raise RuntimeError(f"focalplane[{det}] does not include psi_uv")
    return psi_uv

_provides()

Source code in toast/ops/totalconvolve.py
855
856
857
858
def _provides(self):
    prov = self.detector_pointing.provides()
    prov["detdata"].append(self.det_data)
    return prov

_requires()

Source code in toast/ops/totalconvolve.py
842
843
844
845
846
847
848
849
850
851
852
853
def _requires(self):
    req = self.detector_pointing.requires()
    req["global"].extend([self.pixel_dist, self.covariance])
    req["meta"].extend([self.noise_model])
    req["shared"] = [self.boresight]
    if self.shared_flags is not None:
        req["shared"].append(self.shared_flags)
    if self.det_flags is not None:
        req["detdata"].append(self.det_flags)
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

calibrate_signal(data, det, beam, convolved_data, verbose)

By default, libConviqt results returns a signal that conforms to TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

When calibrate = True, we rescale the TOD to TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization

Source code in toast/ops/totalconvolve.py
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
def calibrate_signal(self, data, det, beam, convolved_data, verbose):
    """By default, libConviqt results returns a signal that conforms to
    TOD = (1 + epsilon) / 2 * intensity + (1 - epsilon) / 2 * polarization.

    When calibrate = True, we rescale the TOD to
    TOD = intensity + (1 - epsilon) / (1 + epsilon) * polarization
    """
    if not self.calibrate:  # or beam.normalized():
        return

    timer = Timer()
    timer.start()
    offset = 0
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        focalplane = obs.telescope.focalplane
        epsilon = self._get_epsilon(focalplane, det)
        # Make sure detector data output exists
        exists = obs.detdata.ensure(
            self.det_data, detectors=[det], create_units=self.det_data_units
        )
        # Loop over views
        views = obs.view[self.view]
        for view in views.detdata[self.det_data]:
            nsample = len(view[det])
            convolved_data[offset : offset + nsample] *= 2 / (1 + epsilon)
            offset += nsample
    if verbose:
        timer.report_clear(f"calibrate detector {det}")
    return

conv_and_interpol(skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter)

Source code in toast/ops/totalconvolve.py
729
730
731
732
733
734
735
736
737
738
739
def conv_and_interpol(
    self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
):
    if (not use_mpi) or (self.comm.size == 1):
        return self.conv_and_interpol_serial(
            skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
        )
    else:
        return self.conv_and_interpol_mpi_shmem(
            skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
        )

conv_and_interpol_mpi(skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter)

Source code in toast/ops/totalconvolve.py
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
def conv_and_interpol_mpi(
    self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
):
    t_conv.start()
    plan = totalconvolve.ConvolverPlan(
        lmax=lmax,
        kmax=mmax,
        sigma=self.oversampling_factor,
        epsilon=self.epsilon,
        nthreads=nthreads,
    )
    myrank, nranks = self.comm.rank, self.comm.size
    cube = np.empty((plan.Npsi(), plan.Ntheta(), plan.Nphi()), dtype=np.float64)
    cube[()] = 0

    # convolution part
    # the work in this nested loop can be distributed over skycomp.shape[0]*(mmax+1) tasks
    for icomp in range(skycomp.shape[0]):
        if (icomp * (mmax + 1)) % nranks == myrank:
            plan.getPlane(skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1])
        for mbeam in range(1, mmax + 1):
            if (icomp * (mmax + 1) + mbeam) % nranks == myrank:
                plan.getPlane(
                    skycomp[icomp, :],
                    beamcomp[icomp, :],
                    mbeam,
                    cube[2 * mbeam - 1 : 2 * mbeam + 1],
                )
    if nranks > 1:  # broadcast the results
        for icomp in range(skycomp.shape[0]):
            self.comm.Bcast(
                [cube[0:1], MPI.DOUBLE], root=(icomp * (mmax + 1)) % nranks
            )
        for mbeam in range(1, mmax + 1):
            self.comm.Bcast(
                [cube[2 * mbeam - 1 : 2 * mbeam + 1], MPI.DOUBLE],
                root=(icomp * (mmax + 1) + mbeam) % nranks,
            )

    plan.prepPsi(cube)
    t_conv.stop()
    t_inter.start()
    res = np.empty(pnt.shape[0], dtype=np.float64)
    plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
    t_inter.stop()
    return res

conv_and_interpol_mpi_shmem(skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter)

Source code in toast/ops/totalconvolve.py
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
def conv_and_interpol_mpi_shmem(
    self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
):
    from pshmem import MPIShared

    t_conv.start()
    plan = totalconvolve.ConvolverPlan(
        lmax=lmax,
        kmax=mmax,
        sigma=self.oversampling_factor,
        epsilon=self.epsilon,
        nthreads=nthreads,
    )

    with MPIShared(
        (plan.Npsi(), plan.Ntheta(), plan.Nphi()), np.float64, self.comm
    ) as shm:
        cube = shm.data
        # Create a separate communicator on every node.
        intracomm = self.comm.Split_type(MPI.COMM_TYPE_SHARED)
        # Create a communicator with all master tasks of the intracomms;
        # on every other task, intercomm will be MPI.COMM_NULL.
        color = 0 if intracomm.rank == 0 else MPI.UNDEFINED
        intercomm = self.comm.Split(color)
        if intracomm.rank == 0:
            # We are on the master task of intracomm and all other tasks on
            # the node will be idle during the next computation step,
            # so we can hijack all their threads.
            nodeplan = totalconvolve.ConvolverPlan(
                lmax=lmax,
                kmax=mmax,
                sigma=self.oversampling_factor,
                epsilon=self.epsilon,
                nthreads=intracomm.size * nthreads,
            )
            mynode, nnodes = intercomm.rank, intercomm.size
            cube[()] = 0.0

            # Convolution part
            # The skycomp.shape[0]*(mmax+1) work items in this nested loop
            # are distributed among the nodes in a round-robin fashion.
            for icomp in range(skycomp.shape[0]):
                if (icomp * (mmax + 1)) % nnodes == mynode:
                    nodeplan.getPlane(
                        skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1]
                    )
                for mbeam in range(1, mmax + 1):
                    if (icomp * (mmax + 1) + mbeam) % nnodes == mynode:
                        nodeplan.getPlane(
                            skycomp[icomp, :],
                            beamcomp[icomp, :],
                            mbeam,
                            cube[2 * mbeam - 1 : 2 * mbeam + 1],
                        )
            if nnodes > 1:  # results must be broadcast to all nodes
                for icomp in range(skycomp.shape[0]):
                    intercomm.Bcast(
                        [cube[0:1], MPI.DOUBLE], root=(icomp * (mmax + 1)) % nnodes
                    )
                for mbeam in range(1, mmax + 1):
                    intercomm.Bcast(
                        [cube[2 * mbeam - 1 : 2 * mbeam + 1], MPI.DOUBLE],
                        root=(icomp * (mmax + 1) + mbeam) % nnodes,
                    )
            nodeplan.prepPsi(cube)
            del nodeplan

        t_conv.stop()
        t_inter.start()
        # Interpolation part
        # No fancy communication is necessary here, since every task has
        # access to the full data cube.
        res = np.empty(pnt.shape[0], dtype=np.float64)
        plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
        t_inter.stop()
        del plan
        del cube
    return res

conv_and_interpol_serial(skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter)

Source code in toast/ops/totalconvolve.py
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
def conv_and_interpol_serial(
    self, skycomp, beamcomp, lmax, mmax, pnt, nthreads, t_conv, t_inter
):
    t_conv.start()
    plan = totalconvolve.ConvolverPlan(
        lmax=lmax,
        kmax=mmax,
        sigma=self.oversampling_factor,
        epsilon=self.epsilon,
        nthreads=nthreads,
    )
    cube = np.empty((plan.Npsi(), plan.Ntheta(), plan.Nphi()), dtype=np.float64)
    cube[()] = 0

    # convolution part
    for icomp in range(skycomp.shape[0]):
        plan.getPlane(skycomp[icomp, :], beamcomp[icomp, :], 0, cube[0:1])
        for mbeam in range(1, mmax + 1):
            plan.getPlane(
                skycomp[icomp, :],
                beamcomp[icomp, :],
                mbeam,
                cube[2 * mbeam - 1 : 2 * mbeam + 1],
            )

    plan.prepPsi(cube)
    t_conv.stop()
    t_inter.start()
    res = np.empty(pnt.shape[0], dtype=np.float64)
    plan.interpol(cube, 0, 0, pnt[:, 0], pnt[:, 1], pnt[:, 2], res)
    t_inter.stop()
    return res

convolve(sky, beam, lmax, mmax, pnt, psi_pol, det, nthreads, verbose)

Source code in toast/ops/totalconvolve.py
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
def convolve(self, sky, beam, lmax, mmax, pnt, psi_pol, det, nthreads, verbose):
    t_conv = Timer()
    t_inter = Timer()

    if self.hwp_angle is None:
        # simply compute TT+EE+BB
        convolved_data = self.conv_and_interpol(
            np.array(sky),
            np.array(beam),
            lmax,
            mmax,
            pnt,
            nthreads,
            t_conv,
            t_inter,
        )
    else:
        # TT
        convolved_data = self.conv_and_interpol(
            np.array([sky[0]]),
            np.array([beam[0]]),
            lmax,
            mmax,
            pnt,
            nthreads,
            t_conv,
            t_inter,
        )
        if self.pol:
            # EE+BB
            slm = np.array([sky[1], sky[2]])
            blm = np.array([beam[1], beam[2]])
            convolved_data += np.cos(4 * psi_pol) * self.conv_and_interpol(
                slm, blm, lmax, mmax, pnt, nthreads, t_conv, t_inter
            ).reshape((-1,))
            # -EB+BE
            blm = np.array([-beam[2], beam[1]])
            convolved_data += np.sin(4 * psi_pol) * self.conv_and_interpol(
                slm, blm, lmax, mmax, pnt, nthreads, t_conv, t_inter
            ).reshape((-1,))

    if verbose:
        t_conv.report_clear(f"convolve detector {det}")
        t_inter.report_clear(f"extract convolved data for {det}")

    convolved_data *= 0.5  # FIXME: not sure where this factor comes from
    return convolved_data

get_beam(beamfile, lmax, mmax, det, verbose)

Source code in toast/ops/totalconvolve.py
474
475
476
477
478
479
480
481
482
def get_beam(self, beamfile, lmax, mmax, det, verbose):
    timer = Timer()
    timer.start()
    beam = self.load_alm(beamfile, lmax, mmax)
    if self.normalize_beam:
        beam *= 1.0 / (2 * np.sqrt(np.pi) * beam[0, 0])
    if verbose:
        timer.report_clear(f"initialize beam for detector {det}")
    return beam

get_buffer(theta, phi, psi, det, verbose)

Pack the pointing into the pointing array

Source code in toast/ops/totalconvolve.py
553
554
555
556
557
558
559
560
561
562
563
564
def get_buffer(self, theta, phi, psi, det, verbose):
    """Pack the pointing into the pointing array"""
    timer = Timer()
    timer.start()
    pnt = np.empty((len(theta), 3))
    pnt[:, 0] = theta
    pnt[:, 1] = phi
    pnt[:, 2] = psi
    pnt[:, 2] += np.pi  # FIXME: not clear yet why this is necessary
    if verbose:
        timer.report_clear(f"pack input array for detector {det}")
    return pnt

get_lmmax(skyfile, beamfile)

Determine the actual lmax and beammmax to use for the convolution from class parameters and values in the files.

Source code in toast/ops/totalconvolve.py
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
def get_lmmax(self, skyfile, beamfile):
    """Determine the actual lmax and beammmax to use for the convolution
    from class parameters and values in the files.
    """
    ncomp = 3 if self.pol else 1
    slmax, blmax, bmmax = -1, -1, -1
    for i in range(ncomp):
        # for sky and beam respectively, lmax is the max of all components
        alm_tmp, mmax_tmp = hp.fitsfunc.read_alm(
            skyfile, hdu=i + 1, return_mmax=True
        )
        lmax_tmp = hp.sphtfunc.Alm.getlmax(alm_tmp.shape[0], mmax_tmp)
        slmax = max(slmax, lmax_tmp)
        alm_tmp, mmax_tmp = hp.fitsfunc.read_alm(
            beamfile, hdu=i + 1, return_mmax=True
        )
        lmax_tmp = hp.sphtfunc.Alm.getlmax(alm_tmp.shape[0], mmax_tmp)
        blmax = max(blmax, lmax_tmp)
        # for the beam, determine also the largest mmax present
        bmmax = max(bmmax, mmax_tmp)
    # no need to go higher than the lower of the lmax from sky and beam
    lmax_out = min(slmax, blmax)
    mmax_out = bmmax
    # if parameters are lower than the detected values, reduce even further
    if self.lmax != -1:
        lmax_out = min(lmax_out, self.lmax)
    if self.beammmax != -1:
        mmax_out = min(mmax_out, self.beammmax)
    return lmax_out, mmax_out

get_pointing(data, det, verbose)

Return the detector pointing as ZYZ Euler angles without the polarization sensitive angle. These angles are to be compatible with Pxx or Dxx frame beam products

Source code in toast/ops/totalconvolve.py
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
def get_pointing(self, data, det, verbose):
    """Return the detector pointing as ZYZ Euler angles without the
    polarization sensitive angle.  These angles are to be compatible
    with Pxx or Dxx frame beam products
    """
    # We need the three pointing angles to describe the
    # pointing.  local_pointing() returns the attitude quaternions.
    nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
    timer = Timer()
    timer.start()
    all_theta, all_phi, all_psi, all_psi_pol = [], [], [], []
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        focalplane = obs.telescope.focalplane
        # Loop over views
        views = obs.view[self.view]
        for view in range(len(views)):
            # Get the flags if needed
            flags = None
            if self.apply_flags:
                if self.shared_flags is not None:
                    flags = np.array(views.shared[self.shared_flags][view])
                    flags &= self.shared_flag_mask
                if self.det_flags is not None:
                    detflags = np.array(views.detdata[self.det_flags][view][det])
                    detflags &= self.det_flag_mask
                    if flags is not None:
                        flags |= detflags
                    else:
                        flags = detflags

            # Timestream of detector quaternions
            quats = views.detdata[self.detector_pointing.quats][view][det]
            if verbose:
                timer.report_clear(f"get detector pointing for {det}")

            if flags is not None:
                quats = quats.copy()
                quats[flags != 0] = nullquat
                if verbose:
                    timer.report_clear(f"initialize flags for detector {det}")

            theta, phi, psi = qa.to_iso_angles(quats)
            # Polarization angle in the Pxx basis
            psi_pol = self._get_psi_pol(focalplane, det)
            if self.dxx:
                # Add angle between Dxx and Pxx
                psi_pol += self._get_psi_uv(focalplane, det)
            psi -= psi_pol
            psi_pol = np.ones(psi.size) * psi_pol
            if self.hwp_angle is not None:
                hwp_angle = views.shared[self.hwp_angle][view]
                psi_pol += 2 * hwp_angle
            all_theta.append(theta)
            all_phi.append(phi)
            all_psi.append(psi)
            all_psi_pol.append(psi_pol)

    if len(all_theta) > 0:
        all_theta = np.hstack(all_theta)
        all_phi = np.hstack(all_phi)
        all_psi = np.hstack(all_psi)
        all_psi_pol = np.hstack(all_psi_pol)

    if verbose:
        timer.report_clear(f"compute pointing angles for detector {det}")
    return all_theta, all_phi, all_psi, all_psi_pol

get_sky(skyfile, lmax, det, verbose)

Source code in toast/ops/totalconvolve.py
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
def get_sky(self, skyfile, lmax, det, verbose):
    timer = Timer()
    timer.start()
    sky = self.load_alm(skyfile, lmax, lmax)
    fwhm = self.fwhm.to_value(u.radian)
    if fwhm != 0:
        gauss = hp.sphtfunc.gauss_beam(fwhm, lmax, pol=True)
        for i in range(sky.shape[0]):
            sky[i] = hp.sphtfunc.almxfl(
                sky[i], 1.0 / gauss[:, i], mmax=lmax, inplace=True
            )
    if self.remove_monopole:
        sky[0, 0] = 0
    if self.remove_dipole:
        sky[0, 1] = 0
        sky[0, lmax + 1] = 0
    if verbose:
        timer.report_clear(f"initialize sky for detector {det}")
    return sky

load_alm(file, lmax, mmax)

Source code in toast/ops/totalconvolve.py
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
def load_alm(self, file, lmax, mmax):
    def read_comp(file, comp, out):
        almX, mmaxX = hp.fitsfunc.read_alm(file, hdu=comp + 1, return_mmax=True)
        lmaxX = hp.sphtfunc.Alm.getlmax(almX.shape[0], mmaxX)

        ofs1, ofs2 = 0, 0
        mylmax = min(lmax, lmaxX)
        for m in range(0, min(mmax, mmaxX) + 1):
            out[comp, ofs1 : ofs1 + mylmax - m + 1] = almX[
                ofs2 : ofs2 + mylmax - m + 1
            ]
            ofs1 += lmax - m + 1
            ofs2 += lmaxX - m + 1

    ncomp = 3 if self.pol else 1
    res = np.zeros(
        (ncomp, hp.sphtfunc.Alm.getsize(lmax, mmax)), dtype=np.complex128
    )
    for i in range(ncomp):
        read_comp(file, i, res)
    return res

save(data, det, convolved_data, verbose)

Store the convolved data.

Source code in toast/ops/totalconvolve.py
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
def save(self, data, det, convolved_data, verbose):
    """Store the convolved data."""
    timer = Timer()
    timer.start()
    offset = 0
    for obs in data.obs:
        if det not in obs.local_detectors:
            continue
        # Loop over views
        views = obs.view[self.view]
        for view in views.detdata[self.det_data]:
            nsample = len(view[det])
            view[det] += convolved_data[offset : offset + nsample]
            offset += nsample
    if verbose:
        timer.report_clear(f"save detector {det}")
    return

Scanning a Healpix Map

toast.ops.ScanHealpixMap

Bases: Operator

Operator which reads a HEALPix format map from disk and scans it to a timestream.

The map file is loaded and distributed among the processes. For each observation, the pointing model is used to expand the pointing and scan the map values into detector data.

Source code in toast/ops/scan_healpix.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
@trait_docs
class ScanHealpixMap(Operator):
    """Operator which reads a HEALPix format map from disk and scans it to a timestream.

    The map file is loaded and distributed among the processes.  For each observation,
    the pointing model is used to expand the pointing and scan the map values into
    detector data.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    file = Unicode(
        None,
        allow_none=True,
        help="Path to healpix FITS file.  Use ';' if providing multiple files",
    )

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating output.  Use ';' if different "
        "files are applied to different flavors",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    subtract = Bool(
        False, help="If True, subtract the map timestream instead of accumulating"
    )

    zero = Bool(False, help="If True, zero the data before accumulating / subtracting")

    pixel_dist = Unicode(
        "pixel_dist",
        help="The Data key where the PixelDistribution object is located",
    )

    pixel_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a pixel pointing operator",
    )

    stokes_weights = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a Stokes weights operator",
    )

    save_map = Bool(False, help="If True, do not delete map during finalize")

    save_pointing = Bool(
        False,
        help="If True, do not clear detector pointing matrices if we "
        "generate the pixel distribution",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("pixel_pointing")
    def _check_pixel_pointing(self, proposal):
        pixels = proposal["value"]
        if pixels is not None:
            if not isinstance(pixels, Operator):
                raise traitlets.TraitError(
                    "pixel_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["pixels", "create_dist", "view"]:
                if not pixels.has_trait(trt):
                    msg = f"pixel_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return pixels

    @traitlets.validate("stokes_weights")
    def _check_stokes_weights(self, proposal):
        weights = proposal["value"]
        if weights is not None:
            if not isinstance(weights, Operator):
                raise traitlets.TraitError(
                    "stokes_weights should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["weights", "view"]:
                if not weights.has_trait(trt):
                    msg = f"stokes_weights operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return weights

    def __init__(self, **kwargs):
        self.map_names = []
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        # Check that the file is set
        if self.file is None:
            raise RuntimeError("You must set the file trait before calling exec()")

        # Split up the file and map names
        self.file_names = self.file.split(";")
        nmap = len(self.file_names)
        self.det_data_keys = self.det_data.split(";")
        nkey = len(self.det_data_keys)
        if nkey != 1 and (nmap != nkey):
            msg = "If multiple detdata keys are provided, each must have its own map"
            raise RuntimeError(msg)
        self.map_names = [f"{self.name}_map{i}" for i in range(nmap)]

        # Construct the pointing distribution if it does not already exist

        if self.pixel_dist not in data:
            pix_dist = BuildPixelDistribution(
                pixel_dist=self.pixel_dist,
                pixel_pointing=self.pixel_pointing,
                save_pointing=self.save_pointing,
            )
            pix_dist.apply(data)

        dist = data[self.pixel_dist]
        if not isinstance(dist, PixelDistribution):
            raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

        # Use the pixel distribution and pointing configuration to allocate our
        # map data and read it in.
        nnz = None
        if self.stokes_weights is None or self.stokes_weights.mode == "I":
            nnz = 1
        elif self.stokes_weights.mode == "IQU":
            nnz = 3
        else:
            msg = f"Unknown Stokes weights mode '{self.stokes_weights.mode}'"
            raise RuntimeError(msg)

        filenames = self.file.split(";")
        detdata_keys = self.det_data.split(";")

        # Create our map(s) to scan named after our own operator name.  Generally the
        # files on disk are stored as float32, but even if not there is no real benefit
        # to having higher precision to simulated map signal that is projected into
        # timestreams.

        for file_name, map_name in zip(self.file_names, self.map_names):
            if map_name not in data:
                data[map_name] = PixelData(
                    dist, dtype=np.float32, n_value=nnz, units=self.det_data_units
                )
                if filename_is_fits(file_name):
                    read_healpix_fits(
                        data[map_name], file_name, nest=self.pixel_pointing.nest
                    )
                elif filename_is_hdf5(file_name):
                    read_healpix_hdf5(
                        data[map_name], file_name, nest=self.pixel_pointing.nest
                    )
                else:
                    msg = f"Could not determine map format (HDF5 or FITS): {self.file}"
                    raise RuntimeError(msg)

        # The pipeline below will run one detector at a time in case we are computing
        # pointing.  Make sure that our full set of requested detector output exists.
        # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            for key in self.det_data_keys:
                # If our output detector data does not yet exist, create it
                exists_data = ob.detdata.ensure(
                    key, detectors=dets, create_units=self.det_data_units
                )

        # Configure the low-level map scanning operator

        scanner = ScanMap(
            det_data=self.det_data_keys[0],
            det_data_units=self.det_data_units,
            det_mask=self.det_mask,
            pixels=self.pixel_pointing.pixels,
            weights=self.stokes_weights.weights,
            map_key=self.map_names[0],
            subtract=self.subtract,
            zero=self.zero,
        )

        # Build and run a pipeline that scans from our map
        scan_pipe = Pipeline(
            detector_sets=["SINGLE"],
            operators=[self.pixel_pointing, self.stokes_weights, scanner],
        )

        for imap, map_name in enumerate(self.map_names):
            if len(self.det_data_keys) == 1:
                det_data_key = self.det_data_keys[0]
            else:
                det_data_key = self.det_data_keys[imap]

            scanner.det_data = det_data_key
            scanner.map_key = map_name
            scan_pipe.apply(data, detectors=detectors, use_accel=False)

            # If we are accumulating on a single key, disable zeroing after first map
            if len(self.det_data_keys) == 1:
                scanner.zero = False

        # Clean up our map, if needed
        if not self.save_map:
            for map_name in self.map_names:
                data[map_name].clear()
                del data[map_name]
        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = self.pixel_pointing.requires()
        req.update(self.stokes_weights.requires())
        return req

    def _provides(self):
        prov = {"global": list(), "detdata": [self.det_data]}
        if self.save_map:
            prov["global"] = self.map_names
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help="Observation detdata key for accumulating output. Use ';' if different files are applied to different flavors") class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

file = Unicode(None, allow_none=True, help="Path to healpix FITS file. Use ';' if providing multiple files") class-attribute instance-attribute

map_names = [] instance-attribute

pixel_dist = Unicode('pixel_dist', help='The Data key where the PixelDistribution object is located') class-attribute instance-attribute

pixel_pointing = Instance(klass=Operator, allow_none=True, help='This must be an instance of a pixel pointing operator') class-attribute instance-attribute

save_map = Bool(False, help='If True, do not delete map during finalize') class-attribute instance-attribute

save_pointing = Bool(False, help='If True, do not clear detector pointing matrices if we generate the pixel distribution') class-attribute instance-attribute

stokes_weights = Instance(klass=Operator, allow_none=True, help='This must be an instance of a Stokes weights operator') class-attribute instance-attribute

subtract = Bool(False, help='If True, subtract the map timestream instead of accumulating') class-attribute instance-attribute

zero = Bool(False, help='If True, zero the data before accumulating / subtracting') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_healpix.py
129
130
131
def __init__(self, **kwargs):
    self.map_names = []
    super().__init__(**kwargs)

_check_det_mask(proposal)

Source code in toast/ops/scan_healpix.py
92
93
94
95
96
97
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_pixel_pointing(proposal)

Source code in toast/ops/scan_healpix.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@traitlets.validate("pixel_pointing")
def _check_pixel_pointing(self, proposal):
    pixels = proposal["value"]
    if pixels is not None:
        if not isinstance(pixels, Operator):
            raise traitlets.TraitError(
                "pixel_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["pixels", "create_dist", "view"]:
            if not pixels.has_trait(trt):
                msg = f"pixel_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return pixels

_check_stokes_weights(proposal)

Source code in toast/ops/scan_healpix.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
@traitlets.validate("stokes_weights")
def _check_stokes_weights(self, proposal):
    weights = proposal["value"]
    if weights is not None:
        if not isinstance(weights, Operator):
            raise traitlets.TraitError(
                "stokes_weights should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["weights", "view"]:
            if not weights.has_trait(trt):
                msg = f"stokes_weights operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return weights

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/scan_healpix.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    # Check that the file is set
    if self.file is None:
        raise RuntimeError("You must set the file trait before calling exec()")

    # Split up the file and map names
    self.file_names = self.file.split(";")
    nmap = len(self.file_names)
    self.det_data_keys = self.det_data.split(";")
    nkey = len(self.det_data_keys)
    if nkey != 1 and (nmap != nkey):
        msg = "If multiple detdata keys are provided, each must have its own map"
        raise RuntimeError(msg)
    self.map_names = [f"{self.name}_map{i}" for i in range(nmap)]

    # Construct the pointing distribution if it does not already exist

    if self.pixel_dist not in data:
        pix_dist = BuildPixelDistribution(
            pixel_dist=self.pixel_dist,
            pixel_pointing=self.pixel_pointing,
            save_pointing=self.save_pointing,
        )
        pix_dist.apply(data)

    dist = data[self.pixel_dist]
    if not isinstance(dist, PixelDistribution):
        raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

    # Use the pixel distribution and pointing configuration to allocate our
    # map data and read it in.
    nnz = None
    if self.stokes_weights is None or self.stokes_weights.mode == "I":
        nnz = 1
    elif self.stokes_weights.mode == "IQU":
        nnz = 3
    else:
        msg = f"Unknown Stokes weights mode '{self.stokes_weights.mode}'"
        raise RuntimeError(msg)

    filenames = self.file.split(";")
    detdata_keys = self.det_data.split(";")

    # Create our map(s) to scan named after our own operator name.  Generally the
    # files on disk are stored as float32, but even if not there is no real benefit
    # to having higher precision to simulated map signal that is projected into
    # timestreams.

    for file_name, map_name in zip(self.file_names, self.map_names):
        if map_name not in data:
            data[map_name] = PixelData(
                dist, dtype=np.float32, n_value=nnz, units=self.det_data_units
            )
            if filename_is_fits(file_name):
                read_healpix_fits(
                    data[map_name], file_name, nest=self.pixel_pointing.nest
                )
            elif filename_is_hdf5(file_name):
                read_healpix_hdf5(
                    data[map_name], file_name, nest=self.pixel_pointing.nest
                )
            else:
                msg = f"Could not determine map format (HDF5 or FITS): {self.file}"
                raise RuntimeError(msg)

    # The pipeline below will run one detector at a time in case we are computing
    # pointing.  Make sure that our full set of requested detector output exists.
    # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        for key in self.det_data_keys:
            # If our output detector data does not yet exist, create it
            exists_data = ob.detdata.ensure(
                key, detectors=dets, create_units=self.det_data_units
            )

    # Configure the low-level map scanning operator

    scanner = ScanMap(
        det_data=self.det_data_keys[0],
        det_data_units=self.det_data_units,
        det_mask=self.det_mask,
        pixels=self.pixel_pointing.pixels,
        weights=self.stokes_weights.weights,
        map_key=self.map_names[0],
        subtract=self.subtract,
        zero=self.zero,
    )

    # Build and run a pipeline that scans from our map
    scan_pipe = Pipeline(
        detector_sets=["SINGLE"],
        operators=[self.pixel_pointing, self.stokes_weights, scanner],
    )

    for imap, map_name in enumerate(self.map_names):
        if len(self.det_data_keys) == 1:
            det_data_key = self.det_data_keys[0]
        else:
            det_data_key = self.det_data_keys[imap]

        scanner.det_data = det_data_key
        scanner.map_key = map_name
        scan_pipe.apply(data, detectors=detectors, use_accel=False)

        # If we are accumulating on a single key, disable zeroing after first map
        if len(self.det_data_keys) == 1:
            scanner.zero = False

    # Clean up our map, if needed
    if not self.save_map:
        for map_name in self.map_names:
            data[map_name].clear()
            del data[map_name]
    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_healpix.py
256
257
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/scan_healpix.py
264
265
266
267
268
def _provides(self):
    prov = {"global": list(), "detdata": [self.det_data]}
    if self.save_map:
        prov["global"] = self.map_names
    return prov

_requires()

Source code in toast/ops/scan_healpix.py
259
260
261
262
def _requires(self):
    req = self.pixel_pointing.requires()
    req.update(self.stokes_weights.requires())
    return req

toast.ops.ScanHealpixMask

Bases: Operator

Operator which reads a HEALPix format mask from disk and scans it to a timestream.

The mask file is loaded and distributed among the processes. For each observation, the pointing model is used to expand the pointing and scan the mask values into detector data.

Source code in toast/ops/scan_healpix.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
@trait_docs
class ScanHealpixMask(Operator):
    """Operator which reads a HEALPix format mask from disk and scans it to a timestream.

    The mask file is loaded and distributed among the processes.  For each observation,
    the pointing model is used to expand the pointing and scan the mask values into
    detector data.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    file = Unicode(None, allow_none=True, help="Path to healpix FITS file")

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flags_value = Int(
        defaults.det_mask_processing,
        help="The detector flag value to set where the mask result is non-zero",
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    mask_bits = Int(
        255, help="The number to bitwise-and with each mask value to form the result"
    )

    pixel_dist = Unicode(
        "pixel_dist",
        help="The Data key where the PixelDistribution object is located",
    )

    pixel_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a pixel pointing operator",
    )

    save_mask = Bool(False, help="If True, do not delete mask during finalize")

    save_pointing = Bool(
        False,
        help="If True, do not clear detector pointing matrices if we "
        "generate the pixel distribution",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("pixel_pointing")
    def _check_pixel_pointing(self, proposal):
        pixels = proposal["value"]
        if pixels is not None:
            if not isinstance(pixels, Operator):
                raise traitlets.TraitError(
                    "pixel_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["pixels", "create_dist", "view"]:
                if not pixels.has_trait(trt):
                    msg = f"pixel_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return pixels

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.mask_name = f"{self.name}_mask"

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        # Check that the file is set
        if self.file is None:
            raise RuntimeError("You must set the file trait before calling exec()")

        # Construct the pointing distribution if it does not already exist

        if self.pixel_dist not in data:
            pix_dist = BuildPixelDistribution(
                pixel_dist=self.pixel_dist,
                pixel_pointing=self.pixel_pointing,
                save_pointing=self.save_pointing,
            )
            pix_dist.apply(data)

        dist = data[self.pixel_dist]
        if not isinstance(dist, PixelDistribution):
            raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

        # Create our map to scan named after our own operator name.  Generally the
        # files on disk are stored as float32, but even if not there is no real benefit
        # to having higher precision to simulated map signal that is projected into
        # timestreams.
        if self.mask_name not in data:
            data[self.mask_name] = PixelData(dist, dtype=np.uint8, n_value=1)
            if filename_is_fits(self.file):
                read_healpix_fits(
                    data[self.mask_name], self.file, nest=self.pixel_pointing.nest
                )
            elif filename_is_hdf5(self.file):
                read_healpix_hdf5(
                    data[self.mask_name], self.file, nest=self.pixel_pointing.nest
                )
            else:
                msg = f"Could not determine mask format (HDF5 or FITS): {self.file}"
                raise RuntimeError(msg)

        # The pipeline below will run one detector at a time in case we are computing
        # pointing.  Make sure that our full set of requested detector output exists.
        # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            # If our output detector data does not yet exist, create it
            exists_flags = ob.detdata.ensure(
                self.det_flags, dtype=np.uint8, detectors=dets
            )

        # Configure the low-level map scanning operator

        scanner = ScanMask(
            det_flags=self.det_flags,
            det_flags_value=self.det_flags_value,
            det_mask=self.det_mask,
            pixels=self.pixel_pointing.pixels,
            mask_key=self.mask_name,
            mask_bits=self.mask_bits,
        )

        # Build and run a pipeline that scans from our map
        scan_pipe = Pipeline(
            detector_sets=["SINGLE"],
            operators=[self.pixel_pointing, scanner],
        )
        scan_pipe.apply(data, detectors=detectors)

        return

    def _finalize(self, data, **kwargs):
        # Clean up our map, if needed
        if not self.save_mask:
            data[self.mask_name].clear()
            del data[self.mask_name]
        return

    def _requires(self):
        req = self.pixel_pointing.requires()
        req.update(self.stokes_weights.requires())
        return req

    def _provides(self):
        prov = {"global": list(), "detdata": [self.det_data]}
        if self.save_map:
            prov["global"] = [self.map_name]
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_flags_value = Int(defaults.det_mask_processing, help='The detector flag value to set where the mask result is non-zero') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

file = Unicode(None, allow_none=True, help='Path to healpix FITS file') class-attribute instance-attribute

mask_bits = Int(255, help='The number to bitwise-and with each mask value to form the result') class-attribute instance-attribute

mask_name = f'{self.name}_mask' instance-attribute

pixel_dist = Unicode('pixel_dist', help='The Data key where the PixelDistribution object is located') class-attribute instance-attribute

pixel_pointing = Instance(klass=Operator, allow_none=True, help='This must be an instance of a pixel pointing operator') class-attribute instance-attribute

save_mask = Bool(False, help='If True, do not delete mask during finalize') class-attribute instance-attribute

save_pointing = Bool(False, help='If True, do not clear detector pointing matrices if we generate the pixel distribution') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_healpix.py
348
349
350
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    self.mask_name = f"{self.name}_mask"

_check_det_mask(proposal)

Source code in toast/ops/scan_healpix.py
326
327
328
329
330
331
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_pixel_pointing(proposal)

Source code in toast/ops/scan_healpix.py
333
334
335
336
337
338
339
340
341
342
343
344
345
346
@traitlets.validate("pixel_pointing")
def _check_pixel_pointing(self, proposal):
    pixels = proposal["value"]
    if pixels is not None:
        if not isinstance(pixels, Operator):
            raise traitlets.TraitError(
                "pixel_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["pixels", "create_dist", "view"]:
            if not pixels.has_trait(trt):
                msg = f"pixel_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return pixels

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/scan_healpix.py
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    # Check that the file is set
    if self.file is None:
        raise RuntimeError("You must set the file trait before calling exec()")

    # Construct the pointing distribution if it does not already exist

    if self.pixel_dist not in data:
        pix_dist = BuildPixelDistribution(
            pixel_dist=self.pixel_dist,
            pixel_pointing=self.pixel_pointing,
            save_pointing=self.save_pointing,
        )
        pix_dist.apply(data)

    dist = data[self.pixel_dist]
    if not isinstance(dist, PixelDistribution):
        raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

    # Create our map to scan named after our own operator name.  Generally the
    # files on disk are stored as float32, but even if not there is no real benefit
    # to having higher precision to simulated map signal that is projected into
    # timestreams.
    if self.mask_name not in data:
        data[self.mask_name] = PixelData(dist, dtype=np.uint8, n_value=1)
        if filename_is_fits(self.file):
            read_healpix_fits(
                data[self.mask_name], self.file, nest=self.pixel_pointing.nest
            )
        elif filename_is_hdf5(self.file):
            read_healpix_hdf5(
                data[self.mask_name], self.file, nest=self.pixel_pointing.nest
            )
        else:
            msg = f"Could not determine mask format (HDF5 or FITS): {self.file}"
            raise RuntimeError(msg)

    # The pipeline below will run one detector at a time in case we are computing
    # pointing.  Make sure that our full set of requested detector output exists.
    # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        # If our output detector data does not yet exist, create it
        exists_flags = ob.detdata.ensure(
            self.det_flags, dtype=np.uint8, detectors=dets
        )

    # Configure the low-level map scanning operator

    scanner = ScanMask(
        det_flags=self.det_flags,
        det_flags_value=self.det_flags_value,
        det_mask=self.det_mask,
        pixels=self.pixel_pointing.pixels,
        mask_key=self.mask_name,
        mask_bits=self.mask_bits,
    )

    # Build and run a pipeline that scans from our map
    scan_pipe = Pipeline(
        detector_sets=["SINGLE"],
        operators=[self.pixel_pointing, scanner],
    )
    scan_pipe.apply(data, detectors=detectors)

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_healpix.py
426
427
428
429
430
431
def _finalize(self, data, **kwargs):
    # Clean up our map, if needed
    if not self.save_mask:
        data[self.mask_name].clear()
        del data[self.mask_name]
    return

_provides()

Source code in toast/ops/scan_healpix.py
438
439
440
441
442
def _provides(self):
    prov = {"global": list(), "detdata": [self.det_data]}
    if self.save_map:
        prov["global"] = [self.map_name]
    return prov

_requires()

Source code in toast/ops/scan_healpix.py
433
434
435
436
def _requires(self):
    req = self.pixel_pointing.requires()
    req.update(self.stokes_weights.requires())
    return req

toast.ops.InterpolateHealpixMap

Bases: Operator

Operator which reads a HEALPix format map from disk and interpolates it to a timestream.

The map file is loaded and placed in shared memory on every participating node. For each observation, the pointing model is used to expand the pointing and bilinearly interpolate the map values into detector data.

Source code in toast/ops/interpolate_healpix.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@trait_docs
class InterpolateHealpixMap(Operator):
    """Operator which reads a HEALPix format map from disk and
    interpolates it to a timestream.

    The map file is loaded and placed in shared memory on every
    participating node.  For each observation, the pointing model is
    used to expand the pointing and bilinearly interpolate the map
    values into detector data.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    file = Unicode(
        None,
        allow_none=True,
        help="Path to healpix FITS file.  Use ';' if providing multiple files",
    )

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating output.  Use ';' if different "
        "files are applied to different flavors",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    subtract = Bool(
        False, help="If True, subtract the map timestream instead of accumulating"
    )

    zero = Bool(False, help="If True, zero the data before accumulating / subtracting")

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight pointing into detector frame",
    )

    stokes_weights = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a Stokes weights operator",
    )

    save_map = Bool(False, help="If True, do not delete map during finalize")

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("detector_pointing")
    def _check_detector_pointing(self, proposal):
        detpointing = proposal["value"]
        if detpointing is not None:
            if not isinstance(detpointing, Operator):
                raise traitlets.TraitError(
                    "detector_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in [
                "view",
                "boresight",
                "shared_flags",
                "shared_flag_mask",
                "quats",
                "coord_in",
                "coord_out",
            ]:
                if not detpointing.has_trait(trt):
                    msg = f"detector_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return detpointing

    @traitlets.validate("stokes_weights")
    def _check_stokes_weights(self, proposal):
        weights = proposal["value"]
        if weights is not None:
            if not isinstance(weights, Operator):
                raise traitlets.TraitError(
                    "stokes_weights should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["weights", "view"]:
                if not weights.has_trait(trt):
                    msg = f"stokes_weights operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return weights

    def __init__(self, **kwargs):
        self.map_names = []
        self.maps = {}
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        for trait in ("file", "detector_pointing", "stokes_weights"):
            if getattr(self, trait) is None:
                msg = f"You must set the '{trait}' trait before calling exec()"
                raise RuntimeError(msg)

        # Split up the file and map names
        self.file_names = self.file.split(";")
        nmap = len(self.file_names)
        self.det_data_keys = self.det_data.split(";")
        nkey = len(self.det_data_keys)
        if nkey != 1 and (nmap != nkey):
            msg = "If multiple detdata keys are provided, each must have its own map"
            raise RuntimeError(msg)
        self.map_names = [f"{self.name}_map{i}" for i in range(nmap)]

        # Determine the number of non-zeros from the Stokes weights
        nnz = None
        if self.stokes_weights is None or self.stokes_weights.mode == "I":
            nnz = 1
        elif self.stokes_weights.mode == "IQU":
            nnz = 3
        else:
            msg = f"Unknown Stokes weights mode '{self.stokes_weights.mode}'"
            raise RuntimeError(msg)

        # Create our map(s) to scan named after our own operator name.  Generally the
        # files on disk are stored as float32, but even if not there is no real benefit
        # to having higher precision to simulated map signal that is projected into
        # timestreams.

        world_comm = data.comm.comm_world
        if world_comm is None:
            world_rank = 0
        else:
            world_rank = world_comm.rank

        for file_name, map_name in zip(self.file_names, self.map_names):
            if map_name not in self.maps:
                if world_rank == 0:
                    m = np.atleast_2d(read_healpix(file_name, None, dtype=np.float32))
                    map_shape = m.shape
                else:
                    m = None
                    map_shape = None
                if world_comm is not None:
                    map_shape = world_comm.bcast(map_shape)
                self.maps[map_name] = MPIShared(map_shape, np.float32, world_comm)
                self.maps[map_name].set(m)

        # Loop over all observations and local detectors, interpolating each map
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            for key in self.det_data_keys:
                # If our output detector data does not yet exist, create it
                exists_data = ob.detdata.ensure(
                    key, detectors=dets, create_units=self.det_data_units
                )
                if self.zero:
                    ob.detdata[key][:] = 0

            ob_data = data.select(obs_name=ob.name)
            current_ob = ob_data.obs[0]
            for idet, det in enumerate(dets):
                self.detector_pointing.apply(ob_data, detectors=[det])
                self.stokes_weights.apply(ob_data, detectors=[det])
                det_quat = current_ob.detdata[self.detector_pointing.quats][det]
                # Convert pointing quaternion into angles
                theta, phi, _ = qa.to_iso_angles(det_quat)
                # Get pointing weights
                weights = current_ob.detdata[self.stokes_weights.weights][det]

                # Interpolate the provided maps and accumulate the
                # appropriate timestreams in the original observation
                for map_name, map_value in self.maps.items():
                    if len(self.det_data_keys) == 1:
                        det_data_key = self.det_data_keys[0]
                    else:
                        det_data_key = self.det_data_keys[imap]
                    ref = ob.detdata[det_data_key][det]
                    nside = hp.get_nside(map_value)
                    interp_pix, interp_weight = hp.pixelfunc.get_interp_weights(
                        nside,
                        theta,
                        phi,
                        nest=False,
                        lonlat=False,
                    )
                    sig = np.zeros_like(ref)
                    for inz, map_column in enumerate(map_value):
                        sig += weights[:, inz] * np.sum(
                            map_column[interp_pix] * interp_weight, 0
                        )
                    if self.subtract:
                        ref -= sig
                    else:
                        ref += sig

        # Clean up our map, if needed
        if not self.save_map:
            for map_name in self.map_names:
                del self.maps[map_name]

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = self.detector_pointing.requires()
        req.update(self.stokes_weights.requires())
        return req

    def _provides(self):
        prov = {"global": list(), "detdata": [self.det_data]}
        if self.save_map:
            prov["global"] = self.map_names
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help="Observation detdata key for accumulating output. Use ';' if different files are applied to different flavors") class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight pointing into detector frame') class-attribute instance-attribute

file = Unicode(None, allow_none=True, help="Path to healpix FITS file. Use ';' if providing multiple files") class-attribute instance-attribute

map_names = [] instance-attribute

maps = {} instance-attribute

save_map = Bool(False, help='If True, do not delete map during finalize') class-attribute instance-attribute

stokes_weights = Instance(klass=Operator, allow_none=True, help='This must be an instance of a Stokes weights operator') class-attribute instance-attribute

subtract = Bool(False, help='If True, subtract the map timestream instead of accumulating') class-attribute instance-attribute

zero = Bool(False, help='If True, zero the data before accumulating / subtracting') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/interpolate_healpix.py
123
124
125
126
def __init__(self, **kwargs):
    self.map_names = []
    self.maps = {}
    super().__init__(**kwargs)

_check_det_mask(proposal)

Source code in toast/ops/interpolate_healpix.py
78
79
80
81
82
83
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_detector_pointing(proposal)

Source code in toast/ops/interpolate_healpix.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
@traitlets.validate("detector_pointing")
def _check_detector_pointing(self, proposal):
    detpointing = proposal["value"]
    if detpointing is not None:
        if not isinstance(detpointing, Operator):
            raise traitlets.TraitError(
                "detector_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in [
            "view",
            "boresight",
            "shared_flags",
            "shared_flag_mask",
            "quats",
            "coord_in",
            "coord_out",
        ]:
            if not detpointing.has_trait(trt):
                msg = f"detector_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return detpointing

_check_stokes_weights(proposal)

Source code in toast/ops/interpolate_healpix.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
@traitlets.validate("stokes_weights")
def _check_stokes_weights(self, proposal):
    weights = proposal["value"]
    if weights is not None:
        if not isinstance(weights, Operator):
            raise traitlets.TraitError(
                "stokes_weights should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["weights", "view"]:
            if not weights.has_trait(trt):
                msg = f"stokes_weights operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return weights

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/interpolate_healpix.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    for trait in ("file", "detector_pointing", "stokes_weights"):
        if getattr(self, trait) is None:
            msg = f"You must set the '{trait}' trait before calling exec()"
            raise RuntimeError(msg)

    # Split up the file and map names
    self.file_names = self.file.split(";")
    nmap = len(self.file_names)
    self.det_data_keys = self.det_data.split(";")
    nkey = len(self.det_data_keys)
    if nkey != 1 and (nmap != nkey):
        msg = "If multiple detdata keys are provided, each must have its own map"
        raise RuntimeError(msg)
    self.map_names = [f"{self.name}_map{i}" for i in range(nmap)]

    # Determine the number of non-zeros from the Stokes weights
    nnz = None
    if self.stokes_weights is None or self.stokes_weights.mode == "I":
        nnz = 1
    elif self.stokes_weights.mode == "IQU":
        nnz = 3
    else:
        msg = f"Unknown Stokes weights mode '{self.stokes_weights.mode}'"
        raise RuntimeError(msg)

    # Create our map(s) to scan named after our own operator name.  Generally the
    # files on disk are stored as float32, but even if not there is no real benefit
    # to having higher precision to simulated map signal that is projected into
    # timestreams.

    world_comm = data.comm.comm_world
    if world_comm is None:
        world_rank = 0
    else:
        world_rank = world_comm.rank

    for file_name, map_name in zip(self.file_names, self.map_names):
        if map_name not in self.maps:
            if world_rank == 0:
                m = np.atleast_2d(read_healpix(file_name, None, dtype=np.float32))
                map_shape = m.shape
            else:
                m = None
                map_shape = None
            if world_comm is not None:
                map_shape = world_comm.bcast(map_shape)
            self.maps[map_name] = MPIShared(map_shape, np.float32, world_comm)
            self.maps[map_name].set(m)

    # Loop over all observations and local detectors, interpolating each map
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        for key in self.det_data_keys:
            # If our output detector data does not yet exist, create it
            exists_data = ob.detdata.ensure(
                key, detectors=dets, create_units=self.det_data_units
            )
            if self.zero:
                ob.detdata[key][:] = 0

        ob_data = data.select(obs_name=ob.name)
        current_ob = ob_data.obs[0]
        for idet, det in enumerate(dets):
            self.detector_pointing.apply(ob_data, detectors=[det])
            self.stokes_weights.apply(ob_data, detectors=[det])
            det_quat = current_ob.detdata[self.detector_pointing.quats][det]
            # Convert pointing quaternion into angles
            theta, phi, _ = qa.to_iso_angles(det_quat)
            # Get pointing weights
            weights = current_ob.detdata[self.stokes_weights.weights][det]

            # Interpolate the provided maps and accumulate the
            # appropriate timestreams in the original observation
            for map_name, map_value in self.maps.items():
                if len(self.det_data_keys) == 1:
                    det_data_key = self.det_data_keys[0]
                else:
                    det_data_key = self.det_data_keys[imap]
                ref = ob.detdata[det_data_key][det]
                nside = hp.get_nside(map_value)
                interp_pix, interp_weight = hp.pixelfunc.get_interp_weights(
                    nside,
                    theta,
                    phi,
                    nest=False,
                    lonlat=False,
                )
                sig = np.zeros_like(ref)
                for inz, map_column in enumerate(map_value):
                    sig += weights[:, inz] * np.sum(
                        map_column[interp_pix] * interp_weight, 0
                    )
                if self.subtract:
                    ref -= sig
                else:
                    ref += sig

    # Clean up our map, if needed
    if not self.save_map:
        for map_name in self.map_names:
            del self.maps[map_name]

    return

_finalize(data, **kwargs)

Source code in toast/ops/interpolate_healpix.py
240
241
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/interpolate_healpix.py
248
249
250
251
252
def _provides(self):
    prov = {"global": list(), "detdata": [self.det_data]}
    if self.save_map:
        prov["global"] = self.map_names
    return prov

_requires()

Source code in toast/ops/interpolate_healpix.py
243
244
245
246
def _requires(self):
    req = self.detector_pointing.requires()
    req.update(self.stokes_weights.requires())
    return req

Scanning a WCS Projected Map

toast.ops.ScanWCSMap

Bases: Operator

Operator which reads a WCS format map from disk and scans it to a timestream.

The map file is loaded and distributed among the processes. For each observation, the pointing model is used to expand the pointing and scan the map values into detector data.

Source code in toast/ops/scan_wcs.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
@trait_docs
class ScanWCSMap(Operator):
    """Operator which reads a WCS format map from disk and scans it to a timestream.

    The map file is loaded and distributed among the processes.  For each observation,
    the pointing model is used to expand the pointing and scan the map values into
    detector data.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    file = Unicode(None, allow_none=True, help="Path to FITS file")

    det_data = Unicode(
        defaults.det_data, help="Observation detdata key for accumulating output"
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    subtract = Bool(
        False, help="If True, subtract the map timestream instead of accumulating"
    )

    zero = Bool(False, help="If True, zero the data before accumulating / subtracting")

    pixel_dist = Unicode(
        "pixel_dist",
        help="The Data key where the PixelDistribution object is located",
    )

    pixel_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a pixel pointing operator",
    )

    stokes_weights = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a Stokes weights operator",
    )

    save_map = Bool(False, help="If True, do not delete map during finalize")

    save_pointing = Bool(
        False,
        help="If True, do not clear detector pointing matrices if we "
        "generate the pixel distribution",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("pixel_pointing")
    def _check_pixel_pointing(self, proposal):
        pixels = proposal["value"]
        if pixels is not None:
            if not isinstance(pixels, Operator):
                raise traitlets.TraitError(
                    "pixel_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["pixels", "create_dist", "view"]:
                if not pixels.has_trait(trt):
                    msg = f"pixel_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return pixels

    @traitlets.validate("stokes_weights")
    def _check_stokes_weights(self, proposal):
        weights = proposal["value"]
        if weights is not None:
            if not isinstance(weights, Operator):
                raise traitlets.TraitError(
                    "stokes_weights should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["weights", "view"]:
                if not weights.has_trait(trt):
                    msg = f"stokes_weights operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return weights

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.map_name = "{}_map".format(self.name)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        # Check that the file is set
        if self.file is None:
            raise RuntimeError("You must set the file trait before calling exec()")

        # Construct the pointing distribution if it does not already exist

        if self.pixel_dist not in data:
            pix_dist = BuildPixelDistribution(
                pixel_dist=self.pixel_dist,
                pixel_pointing=self.pixel_pointing,
                save_pointing=self.save_pointing,
            )
            pix_dist.apply(data)

        dist = data[self.pixel_dist]
        if not isinstance(dist, PixelDistribution):
            raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

        # Use the pixel odistribution and pointing configuration to allocate our
        # map data and read it in.
        nnz = None
        if self.stokes_weights is None or self.stokes_weights.mode == "I":
            nnz = 1
        elif self.stokes_weights.mode == "IQU":
            nnz = 3
        else:
            msg = "Unknown Stokes weights mode '{}'".format(self.stokes_weights.mode)
            raise RuntimeError(msg)

        # Create our map to scan named after our own operator name.  Generally the
        # files on disk are stored as float32, but even if not there is no real benefit
        # to having higher precision to simulated map signal that is projected into
        # timestreams.
        if self.map_name not in data:
            data[self.map_name] = PixelData(
                dist, dtype=np.float32, n_value=nnz, units=self.det_data_units
            )
            read_wcs_fits(data[self.map_name], self.file)

        # The pipeline below will run one detector at a time in case we are computing
        # pointing.  Make sure that our full set of requested detector output exists.
        # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            # If our output detector data does not yet exist, create it
            exists_data = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )

        # Configure the low-level map scanning operator

        scanner = ScanMap(
            det_data=self.det_data,
            det_data_units=self.det_data_units,
            pixels=self.pixel_pointing.pixels,
            weights=self.stokes_weights.weights,
            map_key=self.map_name,
            subtract=self.subtract,
            zero=self.zero,
        )

        # Build and run a pipeline that scans from our map
        scan_pipe = Pipeline(
            detector_sets=["SINGLE"],
            operators=[self.pixel_pointing, self.stokes_weights, scanner],
        )
        scan_pipe.apply(data, detectors=detectors)

        return

    def _finalize(self, data, **kwargs):
        # Clean up our map, if needed
        if not self.save_map:
            data[self.map_name].clear()
            del data[self.map_name]
        return

    def _requires(self):
        req = self.pixel_pointing.requires()
        req.update(self.stokes_weights.requires())
        return req

    def _provides(self):
        prov = {"global": list(), "detdata": [self.det_data]}
        if self.save_map:
            prov["global"] = [self.map_name]
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for accumulating output') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

file = Unicode(None, allow_none=True, help='Path to FITS file') class-attribute instance-attribute

map_name = '{}_map'.format(self.name) instance-attribute

pixel_dist = Unicode('pixel_dist', help='The Data key where the PixelDistribution object is located') class-attribute instance-attribute

pixel_pointing = Instance(klass=Operator, allow_none=True, help='This must be an instance of a pixel pointing operator') class-attribute instance-attribute

save_map = Bool(False, help='If True, do not delete map during finalize') class-attribute instance-attribute

save_pointing = Bool(False, help='If True, do not clear detector pointing matrices if we generate the pixel distribution') class-attribute instance-attribute

stokes_weights = Instance(klass=Operator, allow_none=True, help='This must be an instance of a Stokes weights operator') class-attribute instance-attribute

subtract = Bool(False, help='If True, subtract the map timestream instead of accumulating') class-attribute instance-attribute

zero = Bool(False, help='If True, zero the data before accumulating / subtracting') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_wcs.py
118
119
120
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    self.map_name = "{}_map".format(self.name)

_check_det_mask(proposal)

Source code in toast/ops/scan_wcs.py
81
82
83
84
85
86
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_pixel_pointing(proposal)

Source code in toast/ops/scan_wcs.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
@traitlets.validate("pixel_pointing")
def _check_pixel_pointing(self, proposal):
    pixels = proposal["value"]
    if pixels is not None:
        if not isinstance(pixels, Operator):
            raise traitlets.TraitError(
                "pixel_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["pixels", "create_dist", "view"]:
            if not pixels.has_trait(trt):
                msg = f"pixel_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return pixels

_check_stokes_weights(proposal)

Source code in toast/ops/scan_wcs.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
@traitlets.validate("stokes_weights")
def _check_stokes_weights(self, proposal):
    weights = proposal["value"]
    if weights is not None:
        if not isinstance(weights, Operator):
            raise traitlets.TraitError(
                "stokes_weights should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["weights", "view"]:
            if not weights.has_trait(trt):
                msg = f"stokes_weights operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return weights

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/scan_wcs.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    # Check that the file is set
    if self.file is None:
        raise RuntimeError("You must set the file trait before calling exec()")

    # Construct the pointing distribution if it does not already exist

    if self.pixel_dist not in data:
        pix_dist = BuildPixelDistribution(
            pixel_dist=self.pixel_dist,
            pixel_pointing=self.pixel_pointing,
            save_pointing=self.save_pointing,
        )
        pix_dist.apply(data)

    dist = data[self.pixel_dist]
    if not isinstance(dist, PixelDistribution):
        raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

    # Use the pixel odistribution and pointing configuration to allocate our
    # map data and read it in.
    nnz = None
    if self.stokes_weights is None or self.stokes_weights.mode == "I":
        nnz = 1
    elif self.stokes_weights.mode == "IQU":
        nnz = 3
    else:
        msg = "Unknown Stokes weights mode '{}'".format(self.stokes_weights.mode)
        raise RuntimeError(msg)

    # Create our map to scan named after our own operator name.  Generally the
    # files on disk are stored as float32, but even if not there is no real benefit
    # to having higher precision to simulated map signal that is projected into
    # timestreams.
    if self.map_name not in data:
        data[self.map_name] = PixelData(
            dist, dtype=np.float32, n_value=nnz, units=self.det_data_units
        )
        read_wcs_fits(data[self.map_name], self.file)

    # The pipeline below will run one detector at a time in case we are computing
    # pointing.  Make sure that our full set of requested detector output exists.
    # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        # If our output detector data does not yet exist, create it
        exists_data = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )

    # Configure the low-level map scanning operator

    scanner = ScanMap(
        det_data=self.det_data,
        det_data_units=self.det_data_units,
        pixels=self.pixel_pointing.pixels,
        weights=self.stokes_weights.weights,
        map_key=self.map_name,
        subtract=self.subtract,
        zero=self.zero,
    )

    # Build and run a pipeline that scans from our map
    scan_pipe = Pipeline(
        detector_sets=["SINGLE"],
        operators=[self.pixel_pointing, self.stokes_weights, scanner],
    )
    scan_pipe.apply(data, detectors=detectors)

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_wcs.py
200
201
202
203
204
205
def _finalize(self, data, **kwargs):
    # Clean up our map, if needed
    if not self.save_map:
        data[self.map_name].clear()
        del data[self.map_name]
    return

_provides()

Source code in toast/ops/scan_wcs.py
212
213
214
215
216
def _provides(self):
    prov = {"global": list(), "detdata": [self.det_data]}
    if self.save_map:
        prov["global"] = [self.map_name]
    return prov

_requires()

Source code in toast/ops/scan_wcs.py
207
208
209
210
def _requires(self):
    req = self.pixel_pointing.requires()
    req.update(self.stokes_weights.requires())
    return req

toast.ops.ScanWCSMask

Bases: Operator

Operator which reads a WCS mask from disk and scans it to a timestream.

The mask file is loaded and distributed among the processes. For each observation, the pointing model is used to expand the pointing and scan the mask values into detector data.

Source code in toast/ops/scan_wcs.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@trait_docs
class ScanWCSMask(Operator):
    """Operator which reads a WCS mask from disk and scans it to a timestream.

    The mask file is loaded and distributed among the processes.  For each observation,
    the pointing model is used to expand the pointing and scan the mask values into
    detector data.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    file = Unicode(None, allow_none=True, help="Path to FITS file")

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flags_value = Int(
        defaults.det_mask_processing,
        help="The detector flag value to set where the mask result is non-zero",
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    mask_bits = Int(
        255, help="The number to bitwise-and with each mask value to form the result"
    )

    pixel_dist = Unicode(
        "pixel_dist",
        help="The Data key where the PixelDistribution object is located",
    )

    pixel_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="This must be an instance of a pixel pointing operator",
    )

    save_mask = Bool(False, help="If True, do not delete mask during finalize")

    save_pointing = Bool(
        False,
        help="If True, do not clear detector pointing matrices if we "
        "generate the pixel distribution",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("pixel_pointing")
    def _check_pixel_pointing(self, proposal):
        pixels = proposal["value"]
        if pixels is not None:
            if not isinstance(pixels, Operator):
                raise traitlets.TraitError(
                    "pixel_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in ["pixels", "create_dist", "view"]:
                if not pixels.has_trait(trt):
                    msg = f"pixel_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return pixels

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.mask_name = f"{self.name}_mask"

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        # Check that the file is set
        if self.file is None:
            raise RuntimeError("You must set the file trait before calling exec()")

        # Construct the pointing distribution if it does not already exist

        if self.pixel_dist not in data:
            pix_dist = BuildPixelDistribution(
                pixel_dist=self.pixel_dist,
                pixel_pointing=self.pixel_pointing,
                save_pointing=self.save_pointing,
            )
            pix_dist.apply(data)

        dist = data[self.pixel_dist]
        if not isinstance(dist, PixelDistribution):
            raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

        # Create our map to scan named after our own operator name.  Generally the
        # files on disk are stored as float32, but even if not there is no real benefit
        # to having higher precision to simulated map signal that is projected into
        # timestreams.
        if self.mask_name not in data:
            data[self.mask_name] = PixelData(dist, dtype=np.uint8, n_value=1)
            read_wcs_fits(data[self.mask_name], self.file)

        # The pipeline below will run one detector at a time in case we are computing
        # pointing.  Make sure that our full set of requested detector output exists.
        # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            # If our output detector data does not yet exist, create it
            exists_flags = ob.detdata.ensure(
                self.det_flags, dtype=np.uint8, detectors=dets
            )

        # Configure the low-level map scanning operator

        scanner = ScanMask(
            det_flags=self.det_flags,
            det_flags_value=self.det_flags_value,
            pixels=self.pixel_pointing.pixels,
            mask_key=self.mask_name,
            mask_bits=self.mask_bits,
        )

        # Build and run a pipeline that scans from our map
        scan_pipe = Pipeline(
            detector_sets=["SINGLE"],
            operators=[self.pixel_pointing, scanner],
        )
        scan_pipe.apply(data, detectors=detectors)

        return

    def _finalize(self, data, **kwargs):
        # Clean up our map, if needed
        if not self.save_mask:
            data[self.mask_name].clear()
            del data[self.mask_name]
        return

    def _requires(self):
        req = self.pixel_pointing.requires()
        req.update(self.stokes_weights.requires())
        return req

    def _provides(self):
        prov = {"global": list(), "detdata": [self.det_data]}
        if self.save_map:
            prov["global"] = [self.map_name]
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_flags_value = Int(defaults.det_mask_processing, help='The detector flag value to set where the mask result is non-zero') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

file = Unicode(None, allow_none=True, help='Path to FITS file') class-attribute instance-attribute

mask_bits = Int(255, help='The number to bitwise-and with each mask value to form the result') class-attribute instance-attribute

mask_name = f'{self.name}_mask' instance-attribute

pixel_dist = Unicode('pixel_dist', help='The Data key where the PixelDistribution object is located') class-attribute instance-attribute

pixel_pointing = Instance(klass=Operator, allow_none=True, help='This must be an instance of a pixel pointing operator') class-attribute instance-attribute

save_mask = Bool(False, help='If True, do not delete mask during finalize') class-attribute instance-attribute

save_pointing = Bool(False, help='If True, do not clear detector pointing matrices if we generate the pixel distribution') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_wcs.py
296
297
298
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    self.mask_name = f"{self.name}_mask"

_check_det_mask(proposal)

Source code in toast/ops/scan_wcs.py
274
275
276
277
278
279
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_pixel_pointing(proposal)

Source code in toast/ops/scan_wcs.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
@traitlets.validate("pixel_pointing")
def _check_pixel_pointing(self, proposal):
    pixels = proposal["value"]
    if pixels is not None:
        if not isinstance(pixels, Operator):
            raise traitlets.TraitError(
                "pixel_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in ["pixels", "create_dist", "view"]:
            if not pixels.has_trait(trt):
                msg = f"pixel_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return pixels

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/scan_wcs.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    # Check that the file is set
    if self.file is None:
        raise RuntimeError("You must set the file trait before calling exec()")

    # Construct the pointing distribution if it does not already exist

    if self.pixel_dist not in data:
        pix_dist = BuildPixelDistribution(
            pixel_dist=self.pixel_dist,
            pixel_pointing=self.pixel_pointing,
            save_pointing=self.save_pointing,
        )
        pix_dist.apply(data)

    dist = data[self.pixel_dist]
    if not isinstance(dist, PixelDistribution):
        raise RuntimeError("The pixel_dist must be a PixelDistribution instance")

    # Create our map to scan named after our own operator name.  Generally the
    # files on disk are stored as float32, but even if not there is no real benefit
    # to having higher precision to simulated map signal that is projected into
    # timestreams.
    if self.mask_name not in data:
        data[self.mask_name] = PixelData(dist, dtype=np.uint8, n_value=1)
        read_wcs_fits(data[self.mask_name], self.file)

    # The pipeline below will run one detector at a time in case we are computing
    # pointing.  Make sure that our full set of requested detector output exists.
    # FIXME:  This seems like a common pattern, maybe move to a "Create" operator?
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        # If our output detector data does not yet exist, create it
        exists_flags = ob.detdata.ensure(
            self.det_flags, dtype=np.uint8, detectors=dets
        )

    # Configure the low-level map scanning operator

    scanner = ScanMask(
        det_flags=self.det_flags,
        det_flags_value=self.det_flags_value,
        pixels=self.pixel_pointing.pixels,
        mask_key=self.mask_name,
        mask_bits=self.mask_bits,
    )

    # Build and run a pipeline that scans from our map
    scan_pipe = Pipeline(
        detector_sets=["SINGLE"],
        operators=[self.pixel_pointing, scanner],
    )
    scan_pipe.apply(data, detectors=detectors)

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_wcs.py
363
364
365
366
367
368
def _finalize(self, data, **kwargs):
    # Clean up our map, if needed
    if not self.save_mask:
        data[self.mask_name].clear()
        del data[self.mask_name]
    return

_provides()

Source code in toast/ops/scan_wcs.py
375
376
377
378
379
def _provides(self):
    prov = {"global": list(), "detdata": [self.det_data]}
    if self.save_map:
        prov["global"] = [self.map_name]
    return prov

_requires()

Source code in toast/ops/scan_wcs.py
370
371
372
373
def _requires(self):
    req = self.pixel_pointing.requires()
    req.update(self.stokes_weights.requires())
    return req

Scanning an Arbitrary Map

toast.ops.ScanMap

Bases: Operator

Operator which uses the pointing matrix to scan timestream values from a map.

The map must be a PixelData instance with either float32 or float64 values. The values can either be accumulated or subtracted from the input timestream, and the input timestream can be optionally zeroed out beforehand.

Source code in toast/ops/scan_map/scan_map.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
@trait_docs
class ScanMap(Operator):
    """Operator which uses the pointing matrix to scan timestream values from a map.

    The map must be a PixelData instance with either float32 or float64 values.  The
    values can either be accumulated or subtracted from the input timestream, and the
    input timestream can be optionally zeroed out beforehand.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data, help="Observation detdata key for the timestream data"
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for detector sample flagging",
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    pixels = Unicode(defaults.pixels, help="Observation detdata key for pixel indices")

    weights = Unicode(
        defaults.weights,
        allow_none=True,
        help="Observation detdata key for Stokes weights",
    )

    map_key = Unicode(
        None,
        allow_none=True,
        help="The Data key where the map is located",
    )

    subtract = Bool(
        False, help="If True, subtract the map timestream instead of accumulating"
    )

    zero = Bool(False, help="If True, zero the data before accumulating / subtracting")

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, use_accel=None, **kwargs):
        log = Logger.get()

        # Kernel selection
        implementation, use_accel = self.select_kernels(use_accel=use_accel)

        # Check that the detector data is set
        if self.det_data is None:
            raise RuntimeError("You must set the det_data trait before calling exec()")

        # Check that the map is set
        if self.map_key is None:
            raise RuntimeError("You must set the map_key trait before calling exec()")
        if self.map_key not in data:
            msg = "The map_key '{}' does not exist in the data".format(self.map_key)
            raise RuntimeError(msg)

        map_data = data[self.map_key]
        if not isinstance(map_data, PixelData):
            raise RuntimeError("The map to scan must be a PixelData instance")
        map_dist = map_data.distribution

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            if self.weights is not None:
                # Sanity check the number of non-zeros between the map and the
                # pointing matrix
                check_nnz = 1
                if len(ob.detdata[self.weights].detector_shape) > 1:
                    check_nnz = ob.detdata[self.weights].detector_shape[-1]
                if map_data.n_value != check_nnz:
                    msg = (
                        f"Detector data '{self.weights}' in observation '{ob.name}' "
                        f"has {check_nnz} nnz instead of {map_data.n_value} in the map"
                    )
                    log.error(msg)
                    raise RuntimeError(msg)

            # If our output detector data does not yet exist, create it
            _ = ob.detdata.ensure(
                self.det_data,
                detectors=dets,
                create_units=self.det_data_units,
                accel=use_accel,
            )

            intervals = ob.intervals[self.view].data
            det_data = ob.detdata[self.det_data].data
            det_data_indx = ob.detdata[self.det_data].indices(dets)
            pixels = ob.detdata[self.pixels].data
            pixels_indx = ob.detdata[self.pixels].indices(dets)
            data_scale = unit_conversion(
                map_data.units, ob.detdata[self.det_data].units
            )
            if self.weights is None:
                # Use empty arrays, rather than None, so that we can pass that more
                # easily to compiled kernels that expect a buffer.
                weights = np.array([], dtype=np.float64)
                weight_indx = np.array([], dtype=np.int32)
            else:
                weights = ob.detdata[self.weights].data
                weight_indx = ob.detdata[self.weights].indices(dets)

            scan_map(
                map_dist.global_submap_to_local,
                map_dist.n_pix_submap,
                map_data.data,
                det_data,
                det_data_indx,
                pixels,
                pixels_indx,
                weights,
                weight_indx,
                intervals,
                data_scale,
                bool(self.zero),
                bool(self.subtract),
                False,
                impl=implementation,
                use_accel=use_accel,
            )

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "global": [self.map_key],
            "meta": list(),
            "shared": list(),
            "detdata": [self.pixels, self.det_data],
            "intervals": list(),
        }
        if self.weights is not None:
            req["detdata"].append(self.weights)
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        return {"detdata": [self.det_data]}

    def _implementations(self):
        return [
            ImplementationType.DEFAULT,
            ImplementationType.COMPILED,
            ImplementationType.NUMPY,
            ImplementationType.JAX,
        ]

    def _supports_accel(self):
        return True

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for the timestream data') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

map_key = Unicode(None, allow_none=True, help='The Data key where the map is located') class-attribute instance-attribute

pixels = Unicode(defaults.pixels, help='Observation detdata key for pixel indices') class-attribute instance-attribute

subtract = Bool(False, help='If True, subtract the map timestream instead of accumulating') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

weights = Unicode(defaults.weights, allow_none=True, help='Observation detdata key for Stokes weights') class-attribute instance-attribute

zero = Bool(False, help='If True, zero the data before accumulating / subtracting') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_map/scan_map.py
89
90
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_det_flag_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
82
83
84
85
86
87
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
75
76
77
78
79
80
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_exec(data, detectors=None, use_accel=None, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
@function_timer
def _exec(self, data, detectors=None, use_accel=None, **kwargs):
    log = Logger.get()

    # Kernel selection
    implementation, use_accel = self.select_kernels(use_accel=use_accel)

    # Check that the detector data is set
    if self.det_data is None:
        raise RuntimeError("You must set the det_data trait before calling exec()")

    # Check that the map is set
    if self.map_key is None:
        raise RuntimeError("You must set the map_key trait before calling exec()")
    if self.map_key not in data:
        msg = "The map_key '{}' does not exist in the data".format(self.map_key)
        raise RuntimeError(msg)

    map_data = data[self.map_key]
    if not isinstance(map_data, PixelData):
        raise RuntimeError("The map to scan must be a PixelData instance")
    map_dist = map_data.distribution

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        if self.weights is not None:
            # Sanity check the number of non-zeros between the map and the
            # pointing matrix
            check_nnz = 1
            if len(ob.detdata[self.weights].detector_shape) > 1:
                check_nnz = ob.detdata[self.weights].detector_shape[-1]
            if map_data.n_value != check_nnz:
                msg = (
                    f"Detector data '{self.weights}' in observation '{ob.name}' "
                    f"has {check_nnz} nnz instead of {map_data.n_value} in the map"
                )
                log.error(msg)
                raise RuntimeError(msg)

        # If our output detector data does not yet exist, create it
        _ = ob.detdata.ensure(
            self.det_data,
            detectors=dets,
            create_units=self.det_data_units,
            accel=use_accel,
        )

        intervals = ob.intervals[self.view].data
        det_data = ob.detdata[self.det_data].data
        det_data_indx = ob.detdata[self.det_data].indices(dets)
        pixels = ob.detdata[self.pixels].data
        pixels_indx = ob.detdata[self.pixels].indices(dets)
        data_scale = unit_conversion(
            map_data.units, ob.detdata[self.det_data].units
        )
        if self.weights is None:
            # Use empty arrays, rather than None, so that we can pass that more
            # easily to compiled kernels that expect a buffer.
            weights = np.array([], dtype=np.float64)
            weight_indx = np.array([], dtype=np.int32)
        else:
            weights = ob.detdata[self.weights].data
            weight_indx = ob.detdata[self.weights].indices(dets)

        scan_map(
            map_dist.global_submap_to_local,
            map_dist.n_pix_submap,
            map_data.data,
            det_data,
            det_data_indx,
            pixels,
            pixels_indx,
            weights,
            weight_indx,
            intervals,
            data_scale,
            bool(self.zero),
            bool(self.subtract),
            False,
            impl=implementation,
            use_accel=use_accel,
        )

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
182
183
def _finalize(self, data, **kwargs):
    return

_implementations()

Source code in toast/ops/scan_map/scan_map.py
202
203
204
205
206
207
208
def _implementations(self):
    return [
        ImplementationType.DEFAULT,
        ImplementationType.COMPILED,
        ImplementationType.NUMPY,
        ImplementationType.JAX,
    ]

_provides()

Source code in toast/ops/scan_map/scan_map.py
199
200
def _provides(self):
    return {"detdata": [self.det_data]}

_requires()

Source code in toast/ops/scan_map/scan_map.py
185
186
187
188
189
190
191
192
193
194
195
196
197
def _requires(self):
    req = {
        "global": [self.map_key],
        "meta": list(),
        "shared": list(),
        "detdata": [self.pixels, self.det_data],
        "intervals": list(),
    }
    if self.weights is not None:
        req["detdata"].append(self.weights)
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

_supports_accel()

Source code in toast/ops/scan_map/scan_map.py
210
211
def _supports_accel(self):
    return True

toast.ops.ScanMask

Bases: Operator

Operator which uses the pointing matrix to set timestream flags from a mask.

The mask must be a PixelData instance with an integer data type. The data for each pixel is bitwise-and combined with the mask_bits to form a result. For each detector sample crossing a pixel with a non-zero result, the detector flag is bitwise-or'd with the specified value.

Source code in toast/ops/scan_map/scan_map.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
@trait_docs
class ScanMask(Operator):
    """Operator which uses the pointing matrix to set timestream flags from a mask.

    The mask must be a PixelData instance with an integer data type.  The data for each
    pixel is bitwise-and combined with the mask_bits to form a result.  For each
    detector sample crossing a pixel with a non-zero result, the detector flag is
    bitwise-or'd with the specified value.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flags_value = Int(
        defaults.det_mask_processing,
        help="The detector flag value to set where the mask result is non-zero",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for detector sample flagging",
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    pixels = Unicode("pixels", help="Observation detdata key for pixel indices")

    mask_key = Unicode(
        None,
        allow_none=True,
        help="The Data key where the mask is located",
    )

    mask_bits = Int(
        255, help="The number to bitwise-and with each mask value to form the result"
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, use_accel=None, **kwargs):
        log = Logger.get()

        # Kernel selection
        implementation, use_accel = self.select_kernels(use_accel=use_accel)

        # Check that the detector data is set
        if self.det_flags is None:
            raise RuntimeError("You must set the det_flags trait before calling exec()")

        # Check that the mask is set
        if self.mask_key is None:
            raise RuntimeError("You must set the mask_key trait before calling exec()")
        if self.mask_key not in data:
            msg = "The mask_key '{}' does not exist in the data".format(self.mask_key)
            raise RuntimeError(msg)

        mask_data = data[self.mask_key]
        if not isinstance(mask_data, PixelData):
            raise RuntimeError("The mask to scan must be a PixelData instance")
        mask_dist = mask_data.distribution

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            # If our output detector data does not yet exist, create it with a default
            # width of one byte per sample.
            if self.det_flags not in ob.detdata:
                ob.detdata.create(self.det_flags, dtype=np.uint8, detectors=dets)

            views = ob.view[self.view]
            for ivw, vw in enumerate(views):
                for det in dets:
                    # The pixels and flags.
                    pix = views.detdata[self.pixels][ivw][det]
                    dflags = views.detdata[self.det_flags][ivw][det]

                    # Get local submap and pixels
                    local_sm, local_pix = mask_dist.global_pixel_to_submap(pix)

                    # We could move this to compiled code if it is too slow...
                    masked = (mask_data[local_sm, local_pix, 0] & self.mask_bits) != 0
                    dflags[masked] |= self.det_flags_value

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "global": [self.mask_key],
            "shared": list(),
            "detdata": [self.pixels, self.det_flags],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {"meta": list(), "shared": list(), "detdata": [self.det_flags]}
        return prov

    def _supports_accel(self):
        return False

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_flags_value = Int(defaults.det_mask_processing, help='The detector flag value to set where the mask result is non-zero') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

mask_bits = Int(255, help='The number to bitwise-and with each mask value to form the result') class-attribute instance-attribute

mask_key = Unicode(None, allow_none=True, help='The Data key where the mask is located') class-attribute instance-attribute

pixels = Unicode('pixels', help='Observation detdata key for pixel indices') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_map/scan_map.py
280
281
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_det_flag_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
273
274
275
276
277
278
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
266
267
268
269
270
271
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_exec(data, detectors=None, use_accel=None, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
@function_timer
def _exec(self, data, detectors=None, use_accel=None, **kwargs):
    log = Logger.get()

    # Kernel selection
    implementation, use_accel = self.select_kernels(use_accel=use_accel)

    # Check that the detector data is set
    if self.det_flags is None:
        raise RuntimeError("You must set the det_flags trait before calling exec()")

    # Check that the mask is set
    if self.mask_key is None:
        raise RuntimeError("You must set the mask_key trait before calling exec()")
    if self.mask_key not in data:
        msg = "The mask_key '{}' does not exist in the data".format(self.mask_key)
        raise RuntimeError(msg)

    mask_data = data[self.mask_key]
    if not isinstance(mask_data, PixelData):
        raise RuntimeError("The mask to scan must be a PixelData instance")
    mask_dist = mask_data.distribution

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        # If our output detector data does not yet exist, create it with a default
        # width of one byte per sample.
        if self.det_flags not in ob.detdata:
            ob.detdata.create(self.det_flags, dtype=np.uint8, detectors=dets)

        views = ob.view[self.view]
        for ivw, vw in enumerate(views):
            for det in dets:
                # The pixels and flags.
                pix = views.detdata[self.pixels][ivw][det]
                dflags = views.detdata[self.det_flags][ivw][det]

                # Get local submap and pixels
                local_sm, local_pix = mask_dist.global_pixel_to_submap(pix)

                # We could move this to compiled code if it is too slow...
                masked = (mask_data[local_sm, local_pix, 0] & self.mask_bits) != 0
                dflags[masked] |= self.det_flags_value

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
334
335
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/scan_map/scan_map.py
349
350
351
def _provides(self):
    prov = {"meta": list(), "shared": list(), "detdata": [self.det_flags]}
    return prov

_requires()

Source code in toast/ops/scan_map/scan_map.py
337
338
339
340
341
342
343
344
345
346
347
def _requires(self):
    req = {
        "meta": list(),
        "global": [self.mask_key],
        "shared": list(),
        "detdata": [self.pixels, self.det_flags],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

_supports_accel()

Source code in toast/ops/scan_map/scan_map.py
353
354
def _supports_accel(self):
    return False

toast.ops.ScanScale

Bases: Operator

Operator which uses the pointing matrix to apply pixel weights to timestreams.

The map must be a PixelData instance with either float32 or float64 values and one value per pixel. The timestream samples are multiplied by their corresponding pixel values.

Source code in toast/ops/scan_map/scan_map.py
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
@trait_docs
class ScanScale(Operator):
    """Operator which uses the pointing matrix to apply pixel weights to timestreams.

    The map must be a PixelData instance with either float32 or float64 values and
    one value per pixel.  The timestream samples are multiplied by their corresponding
    pixel values.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_data = Unicode(
        None, allow_none=True, help="Observation detdata key for the timestream data"
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for detector sample flagging",
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    pixels = Unicode(defaults.pixels, help="Observation detdata key for pixel indices")

    weights = Unicode(
        defaults.weights, help="Observation detdata key for Stokes weights"
    )

    map_key = Unicode(
        None,
        allow_none=True,
        help="The Data key where the weight map is located",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det flag mask should be a positive integer")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, use_accel=None, **kwargs):
        log = Logger.get()

        # Kernel selection
        implementation, use_accel = self.select_kernels(use_accel=use_accel)

        # Check that the detector data is set
        if self.det_data is None:
            raise RuntimeError("You must set the det_data trait before calling exec()")

        # Check that the map is set
        if self.map_key is None:
            raise RuntimeError("You must set the map_key trait before calling exec()")
        if self.map_key not in data:
            msg = "The map_key '{}' does not exist in the data".format(self.map_key)
            raise RuntimeError(msg)

        map_data = data[self.map_key]
        if not isinstance(map_data, PixelData):
            raise RuntimeError("The map to scan must be a PixelData instance")
        if map_data.n_value != 1:
            raise RuntimeError("The map to scan must have one value per pixel")
        map_dist = map_data.distribution

        if map_data.units != u.dimensionless_unscaled:
            log.warning("Map for scaling is not unitless.  Ignoring units.")

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            if self.det_data not in ob.detdata:
                msg = "detector data '{}' does not exist in observation {}".format(
                    self.det_data, ob.name
                )
                log.error(msg)
                raise RuntimeError(msg)

            intervals = ob.intervals[self.view].data
            det_data = ob.detdata[self.det_data].data
            det_data_indx = ob.detdata[self.det_data].indices(dets)
            pixels = ob.detdata[self.pixels].data
            pixels_indx = ob.detdata[self.pixels].indices(dets)
            if self.weights is None:
                # Use empty arrays, rather than None, so that we can pass that more
                # easily to compiled kernels that expect a buffer.
                weights = np.array([], dtype=np.float64)
                weight_indx = np.array([], dtype=np.int32)
            else:
                weights = ob.detdata[self.weights].data
                weight_indx = ob.detdata[self.weights].indices(dets)
            scan_map(
                map_dist.global_submap_to_local,
                map_dist.n_pix_submap,
                map_data.data,
                det_data,
                det_data_indx,
                pixels,
                pixels_indx,
                weights,
                weight_indx,
                intervals,
                1.0,
                False,
                False,
                True,
                impl=implementation,
                use_accel=use_accel,
            )

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "global": [self.map_key],
            "shared": list(),
            "detdata": [self.pixels, self.weights, self.det_data],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {"meta": list(), "shared": list(), "detdata": [self.det_data]}
        return prov

    def _supports_accel(self):
        return True

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(None, allow_none=True, help='Observation detdata key for the timestream data') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

map_key = Unicode(None, allow_none=True, help='The Data key where the weight map is located') class-attribute instance-attribute

pixels = Unicode(defaults.pixels, help='Observation detdata key for pixel indices') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

weights = Unicode(defaults.weights, help='Observation detdata key for Stokes weights') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/scan_map/scan_map.py
415
416
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_det_flag_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
408
409
410
411
412
413
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/scan_map/scan_map.py
401
402
403
404
405
406
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_exec(data, detectors=None, use_accel=None, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
@function_timer
def _exec(self, data, detectors=None, use_accel=None, **kwargs):
    log = Logger.get()

    # Kernel selection
    implementation, use_accel = self.select_kernels(use_accel=use_accel)

    # Check that the detector data is set
    if self.det_data is None:
        raise RuntimeError("You must set the det_data trait before calling exec()")

    # Check that the map is set
    if self.map_key is None:
        raise RuntimeError("You must set the map_key trait before calling exec()")
    if self.map_key not in data:
        msg = "The map_key '{}' does not exist in the data".format(self.map_key)
        raise RuntimeError(msg)

    map_data = data[self.map_key]
    if not isinstance(map_data, PixelData):
        raise RuntimeError("The map to scan must be a PixelData instance")
    if map_data.n_value != 1:
        raise RuntimeError("The map to scan must have one value per pixel")
    map_dist = map_data.distribution

    if map_data.units != u.dimensionless_unscaled:
        log.warning("Map for scaling is not unitless.  Ignoring units.")

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        if self.det_data not in ob.detdata:
            msg = "detector data '{}' does not exist in observation {}".format(
                self.det_data, ob.name
            )
            log.error(msg)
            raise RuntimeError(msg)

        intervals = ob.intervals[self.view].data
        det_data = ob.detdata[self.det_data].data
        det_data_indx = ob.detdata[self.det_data].indices(dets)
        pixels = ob.detdata[self.pixels].data
        pixels_indx = ob.detdata[self.pixels].indices(dets)
        if self.weights is None:
            # Use empty arrays, rather than None, so that we can pass that more
            # easily to compiled kernels that expect a buffer.
            weights = np.array([], dtype=np.float64)
            weight_indx = np.array([], dtype=np.int32)
        else:
            weights = ob.detdata[self.weights].data
            weight_indx = ob.detdata[self.weights].indices(dets)
        scan_map(
            map_dist.global_submap_to_local,
            map_dist.n_pix_submap,
            map_data.data,
            det_data,
            det_data_indx,
            pixels,
            pixels_indx,
            weights,
            weight_indx,
            intervals,
            1.0,
            False,
            False,
            True,
            impl=implementation,
            use_accel=use_accel,
        )

    return

_finalize(data, **kwargs)

Source code in toast/ops/scan_map/scan_map.py
494
495
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/scan_map/scan_map.py
509
510
511
def _provides(self):
    prov = {"meta": list(), "shared": list(), "detdata": [self.det_data]}
    return prov

_requires()

Source code in toast/ops/scan_map/scan_map.py
497
498
499
500
501
502
503
504
505
506
507
def _requires(self):
    req = {
        "meta": list(),
        "global": [self.map_key],
        "shared": list(),
        "detdata": [self.pixels, self.weights, self.det_data],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

_supports_accel()

Source code in toast/ops/scan_map/scan_map.py
513
514
def _supports_accel(self):
    return True

Point Sources

toast.ops.SimCatalog

Bases: Operator

Operator that generates variable and static point source signal.

Signal is generated by sampling the provided beam map at appropriate locations and scaling the resulting signal to match the perceived intensity of the source.

Source SED is convolved with the detector bandpass recorded in the focalplane table.

Example catalog entries:

.. highlight:: toml .. code-block:: toml

[example_static_source]
# Celestial coordinate are always given in degrees
ra_deg = 30
dec_deg = -30
# the SED can be specified using an arbitrary number of
# frequency bins.  The SED is interpolated in log-log space to
# convolve with the detector bandpass
# Use either `flux_density_mJy` or `flux_density_Jy` and adjust
# the values accordingly
freqs_ghz = [ 1.0, 1000.0,]
flux_density_mJy = [ 10.0, 1.0,]
# Omitting polarization fraction results in an
# unpolarized source
pol_frac = 0.1
pol_angle_deg = 0

[example_variable_source]
ra_deg = 30
dec_deg = -25
freqs_ghz = [ 1.0, 1000.0,]
# An arbitrary number of SED vectors can be provided but the
# location of the frequency bins is fixed.  Effective SED is
# interpolated between the specified epochs.
flux_density_Jy = [ [ 10.0, 1.0,], [ 30.0, 10.0,], [ 10.0, 1.0,],]
# Omitting the times_mjd entry resuls in a static source
times_mjd = [ 59000.0, 60000.0, 61000.0,]
# The polarization properties can also vary
pol_frac = [ 0.05, 0.15, 0.05,]
pol_angle_deg = [ 45, 45, 45,]

[example_transient_source]
ra_deg = 30
dec_deg = -20
freqs_ghz = [ 1.0, 1000.0,]
flux_density_Jy = [ [ 10.0, 1.0,], [ 30.0, 10.0,],]
# Difference between a variable and transient source is
# simply that the specified epochs do not cover the entire
# simulation time span.  The operator will not extrapolate
# outside the epochs.
times_mjd = [ 59410.0, 59411.0,]
Source code in toast/ops/sim_catalog.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
@trait_docs
class SimCatalog(Operator):
    """Operator that generates variable and static point source signal.

    Signal is generated by sampling the provided beam map at appropriate
    locations and scaling the resulting signal to match the perceived
    intensity of the source.

    Source SED is convolved with the detector bandpass recorded in the
    focalplane table.

    Example catalog entries:

    .. highlight:: toml
    .. code-block:: toml

        [example_static_source]
        # Celestial coordinate are always given in degrees
        ra_deg = 30
        dec_deg = -30
        # the SED can be specified using an arbitrary number of
        # frequency bins.  The SED is interpolated in log-log space to
        # convolve with the detector bandpass
        # Use either `flux_density_mJy` or `flux_density_Jy` and adjust
        # the values accordingly
        freqs_ghz = [ 1.0, 1000.0,]
        flux_density_mJy = [ 10.0, 1.0,]
        # Omitting polarization fraction results in an
        # unpolarized source
        pol_frac = 0.1
        pol_angle_deg = 0

        [example_variable_source]
        ra_deg = 30
        dec_deg = -25
        freqs_ghz = [ 1.0, 1000.0,]
        # An arbitrary number of SED vectors can be provided but the
        # location of the frequency bins is fixed.  Effective SED is
        # interpolated between the specified epochs.
        flux_density_Jy = [ [ 10.0, 1.0,], [ 30.0, 10.0,], [ 10.0, 1.0,],]
        # Omitting the times_mjd entry resuls in a static source
        times_mjd = [ 59000.0, 60000.0, 61000.0,]
        # The polarization properties can also vary
        pol_frac = [ 0.05, 0.15, 0.05,]
        pol_angle_deg = [ 45, 45, 45,]

        [example_transient_source]
        ra_deg = 30
        dec_deg = -20
        freqs_ghz = [ 1.0, 1000.0,]
        flux_density_Jy = [ [ 10.0, 1.0,], [ 30.0, 10.0,],]
        # Difference between a variable and transient source is
        # simply that the specified epochs do not cover the entire
        # simulation time span.  The operator will not extrapolate
        # outside the epochs.
        times_mjd = [ 59410.0, 59411.0,]
    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    times = Unicode(
        defaults.times,
        help="Observation shared key for timestamps",
    )

    hwp_angle = Unicode(
        defaults.hwp_angle,
        help="Observation shared key for HWP angle",
    )

    catalog_file = Unicode(
        None,
        allow_none=True,
        help="Name of the TOML catalog file",
    )

    beam_file = Unicode(
        None,
        allow_none=True,
        help="HDF5 file that stores the simulated beam. "
        "If None, a symmetric Gaussian based on the instrument model will be used.",
    )

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for simulated signal",
    )

    det_mask = Int(
        defaults.det_mask_nonscience,
        help="Bit mask value for per-detector flagging",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight RA/Dec pointing into detector frame",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("catalog_file")
    def _check_catalog_file(self, proposal):
        filename = proposal["value"]
        if filename is not None and not os.path.isfile(filename):
            raise traitlets.TraitError(f"Catalog file does not exist: {filename}")
        return filename

    @traitlets.validate("beam_file")
    def _check_beam_file(self, proposal):
        beam_file = proposal["value"]
        if beam_file is not None and not os.path.isfile(beam_file):
            raise traitlets.TraitError(f"{beam_file} is not a valid beam file")
        return beam_file

    @traitlets.validate("detector_pointing")
    def _check_detector_pointing(self, proposal):
        detpointing = proposal["value"]
        if detpointing is not None:
            if not isinstance(detpointing, Operator):
                raise traitlets.TraitError(
                    "detector_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in [
                "view",
                "boresight",
                "shared_flags",
                "shared_flag_mask",
                "quats",
                "coord_in",
                "coord_out",
            ]:
                if not detpointing.has_trait(trt):
                    msg = f"detector_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return detpointing

    @function_timer
    def _load_catalog(self):
        log = Logger.get()
        # Load the TOML into a dictionary
        with open(self.catalog_file, "r") as f:
            self.catalog = tomlkit.parse(f.read())
        # Check that the necessary keys are defined for every source
        for source_name, source_dict in self.catalog.items():
            for key in ["ra_deg", "dec_deg", "freqs_ghz"]:
                if key not in source_dict:
                    msg = (
                        f"Catalog parsing error: '{source_name}' "
                        f"in '{self.catalog_file}' does not define '{key}'"
                    )
                    raise RuntimeError(msg)
            key1 = "flux_density_Jy"
            key2 = "flux_density_mJy"
            if key1 in source_dict and key2 in source_dict:
                msg = (
                    f"Catalog parsing error: '{source_name}' "
                    f"in '{self.catalog_file}' defines both "
                    f"'{key1}' and '{key2}'"
                )
                raise RuntimeError(msg)
            if key1 not in source_dict and key2 not in source_dict:
                msg = (
                    f"Catalog parsing error: '{source_name}' "
                    f"in '{self.catalog_file}' does not define "
                    f"'{key1}' or '{key2}'"
                )
                raise RuntimeError(msg)
        # Extra keys are allowed but produce warnings
        for source_name, source_dict in self.catalog.items():
            if key not in SUPPORTED_KEYS:
                msg = (
                    f"WARNING: '{source_name}' entry to '{self.catalog_file}'"
                    f"contains an unsupported key: '{key}'"
                )
                log.warning(msg)
        # Translate each source position into a vector for rapid
        # distance calculations
        for source_name, source_dict in self.catalog.items():
            lon = source_dict["ra_deg"]
            lat = source_dict["dec_deg"]
            source_dict["vec"] = hp.dir2vec(lon, lat, lonlat=True).tolist()
        return

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        # Store of per-detector beam properties.  Eventually we could modify the
        # operator traits to list files per detector, per wafer, per tube, etc.
        # For now, we use the same beam for all detectors, so this will have only
        # one entry.
        self.beam_props = dict()

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()
        comm = data.comm

        for trait in "catalog_file", "detector_pointing":
            value = getattr(self, trait)
            if value is None:
                msg = f"You must set `{trait}` before running SimCatalog"
                raise RuntimeError(msg)

        self._load_catalog()

        for obs in data.obs:
            prefix = f"{comm.group} : {obs.name}"

            # Make sure detector data output exists.  If not, create it
            # with units of Kelvin.

            dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
            exists = obs.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )
            det_units = obs.detdata[self.det_data].units
            scale = unit_conversion(u.K, det_units)

            self._observe_catalog(
                data,
                obs,
                prefix,
                dets,
                scale,
            )

        return

    @function_timer
    def _get_beam_map(self, det, focalplane):
        """
        Construct a 2-dimensional interpolator for the beam
        """
        # Read in the simulated beam.  We could add operator traits to
        # specify whether to load different beams based on detector,
        # wafer, tube, etc and check that key here.
        log = Logger.get()
        if "ALL" in self.beam_props:
            # We have already read the single beam file.
            beam_dict = self.beam_props["ALL"]
        else:
            if self.beam_file is None:
                # Use the FWHM to generate a beam dictionary
                fwhm = focalplane[det]["fwhm"]
                sigma = fwhm * gaussian_fwhm_to_sigma
                w = 2 * fwhm
                n = 101  # Should be odd to include origin
                x = np.linspace(-w, w, n)
                y = np.linspace(-w, w, n)
                X, Y = np.meshgrid(x, y)
                model = np.exp(-(X**2 + Y**2) / (2 * sigma**2)).to_value()
                beam_dict = {
                    "data": model,
                    "size": 2 * w,
                    "npix": n,
                    "res": 2 * w / (n - 1),
                }
            else:
                with h5py.File(self.beam_file, "r") as f:
                    beam_dict = {}
                    beam_dict["data"] = f["beam"][:]
                    beam_dict["size"] = f["beam"].attrs["size"] * u.degree
                    beam_dict["res"] = f["beam"].attrs["res"] * u.degree
                    beam_dict["npix"] = f["beam"].attrs["npix"]
                    self.beam_props["ALL"] = beam_dict

        model = beam_dict["data"].copy()
        model /= np.amax(model)

        # DEBUG begin
        # These commands add a tail to the beam that points towards the horizon
        # nx, ny = np.shape(model)
        # nhalf = nx // 2
        # w = 10
        # model[nhalf - w : nhalf + w + 1, 0 : nhalf] = 1
        # DEBUG end

        w = beam_dict["size"].to_value(u.rad) / 2
        n = beam_dict["npix"]
        x = np.linspace(-w, w, n)
        beam = RectBivariateSpline(x, x, model)
        # Farthest distance (corner) where beam data is available
        r = np.sqrt(w**2 + w**2)

        # Measure the solid angle using the interpolator

        x = np.linspace(-w, w, 10 * n + 1)
        dx = (x[1] - x[0]) * u.rad
        beam_solid_angle = np.sum(beam(x, x)) * dx**2

        return beam, r, beam_solid_angle

    @function_timer
    def _observe_catalog(
        self,
        data,
        obs,
        prefix,
        dets,
        scale,
    ):
        """
        Observe the catalog with each detector in tod
        """
        log = Logger.get()

        # Get a view of the data which contains just this single
        # observation
        obs_data = data.select(obs_name=obs.name)
        focalplane = obs.telescope.focalplane

        times_mjd = to_MJD(obs.shared[self.times].data)
        if self.hwp_angle in obs.shared:
            hwp_angle = obs.shared[self.hwp_angle].data
        else:
            hwp_angle = None
        beam = None

        for idet, det in enumerate(dets):
            bandpass = obs.telescope.focalplane.bandpass
            signal = obs.detdata[self.det_data][det]

            self.detector_pointing.apply(obs_data, detectors=[det])
            det_quat = obs.detdata[self.detector_pointing.quats][det]

            # Convert RA/Dec quaternion of the detector into angles
            # `psi` includes the rotation to the detector polarization
            # sensitive direction

            det_theta, det_phi, det_psi = qa.to_iso_angles(det_quat)
            det_vec = hp.dir2vec(det_theta, det_phi).T.copy()
            try:
                det_psi_pol = focalplane[det]["pol_angle"]
            except KeyError:
                det_psi_pol = focalplane[det]["pol_ang"]
            # gamma angle is required when dealing with a HWP
            if hwp_angle is not None:
                det_gamma = focalplane[det]["gamma"]
            else:
                det_gamma = None

            # For now, we use the first detector's beam for all detectors.
            # Will be revisited when more refined beam products become available
            if beam is None or not "ALL" in self.beam_props:
                beam, beam_radius, beam_solid_angle = self._get_beam_map(
                    det, focalplane
                )
            dp_radius = np.cos(beam_radius)

            for source_name, source_dict in self.catalog.items():
                # Is this source close enough to register?
                dp = np.dot(det_vec, source_dict["vec"])
                hit = dp > dp_radius
                nhit = np.sum(hit)
                if nhit == 0:
                    continue

                # Get the appropriate source SED and convolve with the
                # detector bandpass
                if "times_mjd" in source_dict:
                    source_times = np.array(source_dict["times_mjd"])
                    ind = np.array(np.searchsorted(source_times, times_mjd))
                    # When time stamps fall outside the period covered by
                    # source time, we assume the source went quiet
                    good = np.logical_and(ind > 0, ind < len(source_times))
                    hit *= good
                    nhit = np.sum(hit)
                    if nhit == 0:
                        # This source is not active during our observation
                        continue
                    ind = ind[hit]
                    lengths = source_times[ind] - source_times[ind - 1]
                    right_weights = (source_times[ind] - times_mjd[hit]) / lengths
                    left_weights = 1 - right_weights
                    # useful shorthands
                    freq = np.array(source_dict["freqs_ghz"]) * u.GHz
                    if "flux_density_Jy" in source_dict:
                        seds = np.array(source_dict["flux_density_Jy"]) * u.Jy
                    elif "flux_density_mJy" in source_dict:
                        seds = np.array(source_dict["flux_density_mJy"]) * u.mJy
                    else:
                        msg = f"No flux density for {source_name}"
                        raise RuntimeError(msg)
                    # Mean SED used for bandpass convolution
                    wright = np.mean(right_weights)
                    wleft = 1 - wright
                    cindex = int(np.median(ind))
                    sed_mean = wleft * seds[cindex - 1] + wright * seds[cindex]
                    # Time-dependent amplitude to scale the mean SED
                    cfreq = bandpass.center_frequency(det, alpha=-1)
                    amplitudes = []
                    for sed in seds:
                        # Interpolate the SED to the detector central frequency
                        # in log-log domain where power-law spectra are
                        # linear
                        amp = np.exp(
                            np.interp(
                                np.log(cfreq.to_value(u.GHz)),
                                np.log(freq.to_value(u.GHz)),
                                np.log(sed.to_value(u.Jy)),
                            )
                        )
                        amplitudes.append(amp)
                    amplitudes = np.array(amplitudes)
                    # This is the time-dependent amplitude relative to
                    # sed_mean
                    amplitude = (
                        left_weights * amplitudes[ind - 1]
                        + right_weights * amplitudes[ind]
                    )
                    amplitude /= (
                        wleft * amplitudes[cindex - 1] + wright * amplitudes[cindex]
                    )
                    if "pol_frac" in source_dict:
                        pol_fracs = np.array(source_dict["pol_frac"])
                        pol_frac = (
                            left_weights * pol_fracs[ind - 1]
                            + right_weights * pol_fracs[ind]
                        )
                        pol_angles = np.unwrap(np.radians(source_dict["pol_angle_deg"]))
                        pol_angle = np.array(
                            left_weights * pol_angles[ind - 1]
                            + right_weights * pol_angles[ind]
                        )
                    else:
                        pol_frac = None
                        pol_angle = None
                else:
                    freq = np.array(source_dict["freqs_ghz"]) * u.GHz
                    if "flux_density_Jy" in source_dict:
                        sed_mean = np.array(source_dict["flux_density_Jy"]) * u.Jy
                    elif "flux_density_mJy" in source_dict:
                        sed_mean = np.array(source_dict["flux_density_mJy"]) * u.mJy
                    else:
                        msg = f"No flux density for {source_name}"
                        raise RuntimeError(msg)
                    if "pol_frac" in source_dict:
                        pol_frac = np.array(source_dict["pol_frac"])
                        pol_angle = np.radians(source_dict["pol_angle_deg"])
                    else:
                        pol_frac = None
                        pol_angle = None
                    amplitude = 1

                # Convolve the SED with the detector bandpass
                flux_density = bandpass.convolve(
                    det,
                    freq,
                    sed_mean.to_value(u.Jy),
                )

                # Convert the flux density to peak temperature
                temperature = (
                    flux_density
                    / beam_solid_angle.to_value(u.rad**2)
                    / bandpass.kcmb2jysr(det)
                )

                # Modulate the temperature in time
                temperature = temperature * amplitude

                # modulate temperature by polarization
                if pol_frac is not None:
                    Q = temperature * pol_frac * np.cos(2 * pol_angle)
                    U = temperature * pol_frac * np.sin(2 * pol_angle)
                    psi = det_psi[hit]
                    if hwp_angle is not None:
                        psi = 2 * (det_gamma.to_value(u.rad) - hwp_angle[hit]) - psi
                        # COSMO convention, note the sign for U
                        temperature += Q * np.cos(2 * psi) - U * np.sin(2 * psi)
                    else:
                        # COSMO convention, note the sign for U
                        temperature += Q * np.cos(2 * psi) + U * np.sin(2 * psi)

                # Interpolate the beam map at appropriate locations
                source_theta = np.radians(90 - source_dict["dec_deg"])
                source_phi = np.radians(source_dict["ra_deg"])
                phi_diff = (det_phi[hit] - source_phi + np.pi) % (2 * np.pi) - np.pi
                x = phi_diff * np.cos(np.pi / 2 - det_theta[hit])
                y = det_theta[hit] - source_theta
                # Rotate into the beam frame
                psi = det_psi[hit] - det_psi_pol.to_value(u.rad)
                x_beam = np.cos(psi) * x - np.sin(psi) * y
                y_beam = np.sin(psi) * x + np.cos(psi) * y
                sig = beam(x_beam, y_beam, grid=False) * temperature
                signal[hit] += scale * sig

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "shared": [
                self.times,
            ],
        }
        return req

    def _provides(self):
        return {
            "detdata": [
                self.det_data,
            ]
        }

    def _accelerators(self):
        return list()

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

beam_file = Unicode(None, allow_none=True, help='HDF5 file that stores the simulated beam. If None, a symmetric Gaussian based on the instrument model will be used.') class-attribute instance-attribute

beam_props = dict() instance-attribute

catalog_file = Unicode(None, allow_none=True, help='Name of the TOML catalog file') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for simulated signal') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_nonscience, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight RA/Dec pointing into detector frame') class-attribute instance-attribute

hwp_angle = Unicode(defaults.hwp_angle, help='Observation shared key for HWP angle') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_catalog.py
238
239
240
241
242
243
244
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    # Store of per-detector beam properties.  Eventually we could modify the
    # operator traits to list files per detector, per wafer, per tube, etc.
    # For now, we use the same beam for all detectors, so this will have only
    # one entry.
    self.beam_props = dict()

_accelerators()

Source code in toast/ops/sim_catalog.py
561
562
def _accelerators(self):
    return list()

_check_beam_file(proposal)

Source code in toast/ops/sim_catalog.py
161
162
163
164
165
166
@traitlets.validate("beam_file")
def _check_beam_file(self, proposal):
    beam_file = proposal["value"]
    if beam_file is not None and not os.path.isfile(beam_file):
        raise traitlets.TraitError(f"{beam_file} is not a valid beam file")
    return beam_file

_check_catalog_file(proposal)

Source code in toast/ops/sim_catalog.py
154
155
156
157
158
159
@traitlets.validate("catalog_file")
def _check_catalog_file(self, proposal):
    filename = proposal["value"]
    if filename is not None and not os.path.isfile(filename):
        raise traitlets.TraitError(f"Catalog file does not exist: {filename}")
    return filename

_check_det_mask(proposal)

Source code in toast/ops/sim_catalog.py
147
148
149
150
151
152
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_detector_pointing(proposal)

Source code in toast/ops/sim_catalog.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
@traitlets.validate("detector_pointing")
def _check_detector_pointing(self, proposal):
    detpointing = proposal["value"]
    if detpointing is not None:
        if not isinstance(detpointing, Operator):
            raise traitlets.TraitError(
                "detector_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in [
            "view",
            "boresight",
            "shared_flags",
            "shared_flag_mask",
            "quats",
            "coord_in",
            "coord_out",
        ]:
            if not detpointing.has_trait(trt):
                msg = f"detector_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return detpointing

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_catalog.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()
    comm = data.comm

    for trait in "catalog_file", "detector_pointing":
        value = getattr(self, trait)
        if value is None:
            msg = f"You must set `{trait}` before running SimCatalog"
            raise RuntimeError(msg)

    self._load_catalog()

    for obs in data.obs:
        prefix = f"{comm.group} : {obs.name}"

        # Make sure detector data output exists.  If not, create it
        # with units of Kelvin.

        dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)
        exists = obs.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )
        det_units = obs.detdata[self.det_data].units
        scale = unit_conversion(u.K, det_units)

        self._observe_catalog(
            data,
            obs,
            prefix,
            dets,
            scale,
        )

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_catalog.py
543
544
def _finalize(self, data, **kwargs):
    return

_get_beam_map(det, focalplane)

Construct a 2-dimensional interpolator for the beam

Source code in toast/ops/sim_catalog.py
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
@function_timer
def _get_beam_map(self, det, focalplane):
    """
    Construct a 2-dimensional interpolator for the beam
    """
    # Read in the simulated beam.  We could add operator traits to
    # specify whether to load different beams based on detector,
    # wafer, tube, etc and check that key here.
    log = Logger.get()
    if "ALL" in self.beam_props:
        # We have already read the single beam file.
        beam_dict = self.beam_props["ALL"]
    else:
        if self.beam_file is None:
            # Use the FWHM to generate a beam dictionary
            fwhm = focalplane[det]["fwhm"]
            sigma = fwhm * gaussian_fwhm_to_sigma
            w = 2 * fwhm
            n = 101  # Should be odd to include origin
            x = np.linspace(-w, w, n)
            y = np.linspace(-w, w, n)
            X, Y = np.meshgrid(x, y)
            model = np.exp(-(X**2 + Y**2) / (2 * sigma**2)).to_value()
            beam_dict = {
                "data": model,
                "size": 2 * w,
                "npix": n,
                "res": 2 * w / (n - 1),
            }
        else:
            with h5py.File(self.beam_file, "r") as f:
                beam_dict = {}
                beam_dict["data"] = f["beam"][:]
                beam_dict["size"] = f["beam"].attrs["size"] * u.degree
                beam_dict["res"] = f["beam"].attrs["res"] * u.degree
                beam_dict["npix"] = f["beam"].attrs["npix"]
                self.beam_props["ALL"] = beam_dict

    model = beam_dict["data"].copy()
    model /= np.amax(model)

    # DEBUG begin
    # These commands add a tail to the beam that points towards the horizon
    # nx, ny = np.shape(model)
    # nhalf = nx // 2
    # w = 10
    # model[nhalf - w : nhalf + w + 1, 0 : nhalf] = 1
    # DEBUG end

    w = beam_dict["size"].to_value(u.rad) / 2
    n = beam_dict["npix"]
    x = np.linspace(-w, w, n)
    beam = RectBivariateSpline(x, x, model)
    # Farthest distance (corner) where beam data is available
    r = np.sqrt(w**2 + w**2)

    # Measure the solid angle using the interpolator

    x = np.linspace(-w, w, 10 * n + 1)
    dx = (x[1] - x[0]) * u.rad
    beam_solid_angle = np.sum(beam(x, x)) * dx**2

    return beam, r, beam_solid_angle

_load_catalog()

Source code in toast/ops/sim_catalog.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
@function_timer
def _load_catalog(self):
    log = Logger.get()
    # Load the TOML into a dictionary
    with open(self.catalog_file, "r") as f:
        self.catalog = tomlkit.parse(f.read())
    # Check that the necessary keys are defined for every source
    for source_name, source_dict in self.catalog.items():
        for key in ["ra_deg", "dec_deg", "freqs_ghz"]:
            if key not in source_dict:
                msg = (
                    f"Catalog parsing error: '{source_name}' "
                    f"in '{self.catalog_file}' does not define '{key}'"
                )
                raise RuntimeError(msg)
        key1 = "flux_density_Jy"
        key2 = "flux_density_mJy"
        if key1 in source_dict and key2 in source_dict:
            msg = (
                f"Catalog parsing error: '{source_name}' "
                f"in '{self.catalog_file}' defines both "
                f"'{key1}' and '{key2}'"
            )
            raise RuntimeError(msg)
        if key1 not in source_dict and key2 not in source_dict:
            msg = (
                f"Catalog parsing error: '{source_name}' "
                f"in '{self.catalog_file}' does not define "
                f"'{key1}' or '{key2}'"
            )
            raise RuntimeError(msg)
    # Extra keys are allowed but produce warnings
    for source_name, source_dict in self.catalog.items():
        if key not in SUPPORTED_KEYS:
            msg = (
                f"WARNING: '{source_name}' entry to '{self.catalog_file}'"
                f"contains an unsupported key: '{key}'"
            )
            log.warning(msg)
    # Translate each source position into a vector for rapid
    # distance calculations
    for source_name, source_dict in self.catalog.items():
        lon = source_dict["ra_deg"]
        lat = source_dict["dec_deg"]
        source_dict["vec"] = hp.dir2vec(lon, lat, lonlat=True).tolist()
    return

_observe_catalog(data, obs, prefix, dets, scale)

Observe the catalog with each detector in tod

Source code in toast/ops/sim_catalog.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
@function_timer
def _observe_catalog(
    self,
    data,
    obs,
    prefix,
    dets,
    scale,
):
    """
    Observe the catalog with each detector in tod
    """
    log = Logger.get()

    # Get a view of the data which contains just this single
    # observation
    obs_data = data.select(obs_name=obs.name)
    focalplane = obs.telescope.focalplane

    times_mjd = to_MJD(obs.shared[self.times].data)
    if self.hwp_angle in obs.shared:
        hwp_angle = obs.shared[self.hwp_angle].data
    else:
        hwp_angle = None
    beam = None

    for idet, det in enumerate(dets):
        bandpass = obs.telescope.focalplane.bandpass
        signal = obs.detdata[self.det_data][det]

        self.detector_pointing.apply(obs_data, detectors=[det])
        det_quat = obs.detdata[self.detector_pointing.quats][det]

        # Convert RA/Dec quaternion of the detector into angles
        # `psi` includes the rotation to the detector polarization
        # sensitive direction

        det_theta, det_phi, det_psi = qa.to_iso_angles(det_quat)
        det_vec = hp.dir2vec(det_theta, det_phi).T.copy()
        try:
            det_psi_pol = focalplane[det]["pol_angle"]
        except KeyError:
            det_psi_pol = focalplane[det]["pol_ang"]
        # gamma angle is required when dealing with a HWP
        if hwp_angle is not None:
            det_gamma = focalplane[det]["gamma"]
        else:
            det_gamma = None

        # For now, we use the first detector's beam for all detectors.
        # Will be revisited when more refined beam products become available
        if beam is None or not "ALL" in self.beam_props:
            beam, beam_radius, beam_solid_angle = self._get_beam_map(
                det, focalplane
            )
        dp_radius = np.cos(beam_radius)

        for source_name, source_dict in self.catalog.items():
            # Is this source close enough to register?
            dp = np.dot(det_vec, source_dict["vec"])
            hit = dp > dp_radius
            nhit = np.sum(hit)
            if nhit == 0:
                continue

            # Get the appropriate source SED and convolve with the
            # detector bandpass
            if "times_mjd" in source_dict:
                source_times = np.array(source_dict["times_mjd"])
                ind = np.array(np.searchsorted(source_times, times_mjd))
                # When time stamps fall outside the period covered by
                # source time, we assume the source went quiet
                good = np.logical_and(ind > 0, ind < len(source_times))
                hit *= good
                nhit = np.sum(hit)
                if nhit == 0:
                    # This source is not active during our observation
                    continue
                ind = ind[hit]
                lengths = source_times[ind] - source_times[ind - 1]
                right_weights = (source_times[ind] - times_mjd[hit]) / lengths
                left_weights = 1 - right_weights
                # useful shorthands
                freq = np.array(source_dict["freqs_ghz"]) * u.GHz
                if "flux_density_Jy" in source_dict:
                    seds = np.array(source_dict["flux_density_Jy"]) * u.Jy
                elif "flux_density_mJy" in source_dict:
                    seds = np.array(source_dict["flux_density_mJy"]) * u.mJy
                else:
                    msg = f"No flux density for {source_name}"
                    raise RuntimeError(msg)
                # Mean SED used for bandpass convolution
                wright = np.mean(right_weights)
                wleft = 1 - wright
                cindex = int(np.median(ind))
                sed_mean = wleft * seds[cindex - 1] + wright * seds[cindex]
                # Time-dependent amplitude to scale the mean SED
                cfreq = bandpass.center_frequency(det, alpha=-1)
                amplitudes = []
                for sed in seds:
                    # Interpolate the SED to the detector central frequency
                    # in log-log domain where power-law spectra are
                    # linear
                    amp = np.exp(
                        np.interp(
                            np.log(cfreq.to_value(u.GHz)),
                            np.log(freq.to_value(u.GHz)),
                            np.log(sed.to_value(u.Jy)),
                        )
                    )
                    amplitudes.append(amp)
                amplitudes = np.array(amplitudes)
                # This is the time-dependent amplitude relative to
                # sed_mean
                amplitude = (
                    left_weights * amplitudes[ind - 1]
                    + right_weights * amplitudes[ind]
                )
                amplitude /= (
                    wleft * amplitudes[cindex - 1] + wright * amplitudes[cindex]
                )
                if "pol_frac" in source_dict:
                    pol_fracs = np.array(source_dict["pol_frac"])
                    pol_frac = (
                        left_weights * pol_fracs[ind - 1]
                        + right_weights * pol_fracs[ind]
                    )
                    pol_angles = np.unwrap(np.radians(source_dict["pol_angle_deg"]))
                    pol_angle = np.array(
                        left_weights * pol_angles[ind - 1]
                        + right_weights * pol_angles[ind]
                    )
                else:
                    pol_frac = None
                    pol_angle = None
            else:
                freq = np.array(source_dict["freqs_ghz"]) * u.GHz
                if "flux_density_Jy" in source_dict:
                    sed_mean = np.array(source_dict["flux_density_Jy"]) * u.Jy
                elif "flux_density_mJy" in source_dict:
                    sed_mean = np.array(source_dict["flux_density_mJy"]) * u.mJy
                else:
                    msg = f"No flux density for {source_name}"
                    raise RuntimeError(msg)
                if "pol_frac" in source_dict:
                    pol_frac = np.array(source_dict["pol_frac"])
                    pol_angle = np.radians(source_dict["pol_angle_deg"])
                else:
                    pol_frac = None
                    pol_angle = None
                amplitude = 1

            # Convolve the SED with the detector bandpass
            flux_density = bandpass.convolve(
                det,
                freq,
                sed_mean.to_value(u.Jy),
            )

            # Convert the flux density to peak temperature
            temperature = (
                flux_density
                / beam_solid_angle.to_value(u.rad**2)
                / bandpass.kcmb2jysr(det)
            )

            # Modulate the temperature in time
            temperature = temperature * amplitude

            # modulate temperature by polarization
            if pol_frac is not None:
                Q = temperature * pol_frac * np.cos(2 * pol_angle)
                U = temperature * pol_frac * np.sin(2 * pol_angle)
                psi = det_psi[hit]
                if hwp_angle is not None:
                    psi = 2 * (det_gamma.to_value(u.rad) - hwp_angle[hit]) - psi
                    # COSMO convention, note the sign for U
                    temperature += Q * np.cos(2 * psi) - U * np.sin(2 * psi)
                else:
                    # COSMO convention, note the sign for U
                    temperature += Q * np.cos(2 * psi) + U * np.sin(2 * psi)

            # Interpolate the beam map at appropriate locations
            source_theta = np.radians(90 - source_dict["dec_deg"])
            source_phi = np.radians(source_dict["ra_deg"])
            phi_diff = (det_phi[hit] - source_phi + np.pi) % (2 * np.pi) - np.pi
            x = phi_diff * np.cos(np.pi / 2 - det_theta[hit])
            y = det_theta[hit] - source_theta
            # Rotate into the beam frame
            psi = det_psi[hit] - det_psi_pol.to_value(u.rad)
            x_beam = np.cos(psi) * x - np.sin(psi) * y
            y_beam = np.sin(psi) * x + np.cos(psi) * y
            sig = beam(x_beam, y_beam, grid=False) * temperature
            signal[hit] += scale * sig

    return

_provides()

Source code in toast/ops/sim_catalog.py
554
555
556
557
558
559
def _provides(self):
    return {
        "detdata": [
            self.det_data,
        ]
    }

_requires()

Source code in toast/ops/sim_catalog.py
546
547
548
549
550
551
552
def _requires(self):
    req = {
        "shared": [
            self.times,
        ],
    }
    return req

Terrestrial Signals

These operators generate detector signal from the Earth's atmosphere and other sources of power outside a ground-based telescope.

toast.ops.WeatherModel

Bases: Operator

Create a default weather model

The weather model is used to draw observing conditions such as temperature, wind and PWV.

Source code in toast/ops/weather_model.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
@trait_docs
class WeatherModel(Operator):
    """Create a default weather model

    The weather model is used to draw observing conditions such as
    temperature, wind and PWV.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    weather = Unicode(
        None,
        allow_none=True,
        help="Name of built-in weather site (e.g. 'atacama', 'south_pole') or path to HDF5 file",
    )

    realization = Int(0, help="The realization index")

    max_pwv = Quantity(
        None, allow_none=True, help="Maximum PWV for the simulated weather."
    )

    median_weather = Bool(
        False,
        help="Use median weather parameters instead of sampling from the distributions",
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        for trait in ("weather",):
            if getattr(self, trait) is None:
                msg = f"You must set the '{trait}' trait before calling exec()"
                raise RuntimeError(msg)

        for ob in data.obs:
            comm = data.comm.comm_group
            site = ob.telescope.site
            times = ob.shared[self.times]
            tmin = times[0]
            tmax = times[-1]
            if comm is not None:
                tmin = comm.allreduce(tmin, MPI.MIN)
                tmax = comm.allreduce(tmax, MPI.MAX)
            from datetime import datetime, timezone

            mid_time = datetime.fromtimestamp((tmin + tmax) / 2, timezone.utc)
            try:
                weather = SimWeather(
                    time=mid_time,
                    name=self.weather,
                    site_uid=site.uid,
                    realization=self.realization,
                    max_pwv=self.max_pwv,
                    median_weather=self.median_weather,
                )
            except RuntimeError:
                # must be a file
                weather = SimWeather(
                    time=mid_time,
                    file=self.weather,
                    site_uid=site.uid,
                    realization=self.realization,
                    max_pwv=self.max_pwv,
                    median_weather=self.median_weather,
                )
            site.weather = weather

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {"shared": [self.times]}
        return

    def _provides(self):
        prov = {"meta": []}
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

max_pwv = Quantity(None, allow_none=True, help='Maximum PWV for the simulated weather.') class-attribute instance-attribute

median_weather = Bool(False, help='Use median weather parameters instead of sampling from the distributions') class-attribute instance-attribute

realization = Int(0, help='The realization index') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

weather = Unicode(None, allow_none=True, help="Name of built-in weather site (e.g. 'atacama', 'south_pole') or path to HDF5 file") class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/weather_model.py
55
56
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/weather_model.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    for trait in ("weather",):
        if getattr(self, trait) is None:
            msg = f"You must set the '{trait}' trait before calling exec()"
            raise RuntimeError(msg)

    for ob in data.obs:
        comm = data.comm.comm_group
        site = ob.telescope.site
        times = ob.shared[self.times]
        tmin = times[0]
        tmax = times[-1]
        if comm is not None:
            tmin = comm.allreduce(tmin, MPI.MIN)
            tmax = comm.allreduce(tmax, MPI.MAX)
        from datetime import datetime, timezone

        mid_time = datetime.fromtimestamp((tmin + tmax) / 2, timezone.utc)
        try:
            weather = SimWeather(
                time=mid_time,
                name=self.weather,
                site_uid=site.uid,
                realization=self.realization,
                max_pwv=self.max_pwv,
                median_weather=self.median_weather,
            )
        except RuntimeError:
            # must be a file
            weather = SimWeather(
                time=mid_time,
                file=self.weather,
                site_uid=site.uid,
                realization=self.realization,
                max_pwv=self.max_pwv,
                median_weather=self.median_weather,
            )
        site.weather = weather

_finalize(data, **kwargs)

Source code in toast/ops/weather_model.py
100
101
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/weather_model.py
107
108
109
def _provides(self):
    prov = {"meta": []}
    return prov

_requires()

Source code in toast/ops/weather_model.py
103
104
105
def _requires(self):
    req = {"shared": [self.times]}
    return

toast.ops.SimAtmosphere

Bases: Operator

Operator which generates atmosphere timestreams for detectors.

All processes collectively generate the atmospheric realization. Then each process passes through its local data and observes the atmosphere.

Source code in toast/ops/sim_tod_atm.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
@trait_docs
class SimAtmosphere(Operator):
    """Operator which generates atmosphere timestreams for detectors.

    All processes collectively generate the atmospheric realization. Then each process
    passes through its local data and observes the atmosphere.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating atmosphere timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight Az/El pointing into detector frame",
    )

    detector_weights = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight Az/El pointing into detector weights",
    )

    polarization_fraction = Float(
        0,
        help="Polarization fraction (only Q polarization).",
    )

    shared_flags = Unicode(
        defaults.shared_flags,
        allow_none=True,
        help="Observation shared key for telescope flags to use",
    )

    shared_flag_mask = Int(
        defaults.shared_mask_invalid, help="Bit mask value for optional flagging"
    )

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for per-detector flagging",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid, help="Bit mask value for detector sample flagging"
    )

    turnaround_interval = Unicode(
        "turnaround", allow_none=True, help="Interval name for turnarounds"
    )

    realization = Int(
        0, help="If simulating multiple realizations, the realization index"
    )

    component = Int(
        123456, help="The component index to use for this atmosphere simulation"
    )

    lmin_center = Quantity(
        0.01 * u.meter, help="Kolmogorov turbulence dissipation scale center"
    )

    lmin_sigma = Quantity(
        0.001 * u.meter, help="Kolmogorov turbulence dissipation scale sigma"
    )

    lmax_center = Quantity(
        10.0 * u.meter, help="Kolmogorov turbulence injection scale center"
    )

    lmax_sigma = Quantity(
        10.0 * u.meter, help="Kolmogorov turbulence injection scale sigma"
    )

    gain = Float(1e-5, help="Scaling applied to the simulated TOD")

    zatm = Quantity(40000.0 * u.meter, help="Atmosphere extent for temperature profile")

    zmax = Quantity(
        2000.0 * u.meter, help="Atmosphere extent for water vapor integration"
    )

    xstep = Quantity(100.0 * u.meter, help="Size of volume elements in X direction")

    ystep = Quantity(100.0 * u.meter, help="Size of volume elements in Y direction")

    zstep = Quantity(100.0 * u.meter, help="Size of volume elements in Z direction")

    z0_center = Quantity(
        2000.0 * u.meter, help="Central value of the water vapor distribution"
    )

    z0_sigma = Quantity(0.0 * u.meter, help="Sigma of the water vapor distribution")

    wind_dist = Quantity(
        3000.0 * u.meter,
        help="Maximum wind drift before discarding the volume and creating a new one",
    )

    fade_time = Quantity(
        60.0 * u.s,
        help="Fade in/out time to avoid a step at wind break.",
    )

    sample_rate = Quantity(
        None,
        allow_none=True,
        help="Rate at which to sample atmospheric TOD before interpolation.  "
        "Default is no interpolation.",
    )

    nelem_sim_max = Int(10000, help="Controls the size of the simulation slices")

    n_bandpass_freqs = Int(
        100,
        help="The number of sampling frequencies used when convolving the bandpass with atmosphere absorption and loading",
    )

    cache_dir = Unicode(
        None,
        allow_none=True,
        help="Directory to use for loading / saving atmosphere realizations",
    )

    overwrite_cache = Bool(
        False, help="If True, redo and overwrite any cached atmospheric realizations."
    )

    cache_only = Bool(
        False, help="If True, only cache the atmosphere, do not observe it."
    )

    debug_spectrum = Bool(False, help="If True, dump out Kolmogorov debug files")

    debug_tod = Bool(False, help="If True, dump TOD to pickle files")

    debug_snapshots = Bool(
        False, help="If True, dump snapshots of the atmosphere slabs to pickle files"
    )

    debug_plots = Bool(False, help="If True, make plots of the debug snapshots")

    add_loading = Bool(True, help="Add elevation-dependent loading.")

    field_of_view = Quantity(
        None,
        allow_none=True,
        help="Override the focalplane field of view",
    )

    corr_lim = Float(
        1e-3,
        help="Correlation limit is used to measure the correlation length of the "
        "simulation.  Elements further than correlation length apart have their "
        "covariance set to zero.",
    )

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Flag mask should be a positive integer")
        return check

    @traitlets.validate("shared_flag_mask")
    def _check_shared_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Flag mask should be a positive integer")
        return check

    @traitlets.validate("detector_pointing")
    def _check_detector_pointing(self, proposal):
        detpointing = proposal["value"]
        if detpointing is not None:
            if not isinstance(detpointing, Operator):
                raise traitlets.TraitError(
                    "detector_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in [
                "view",
                "boresight",
                "shared_flags",
                "shared_flag_mask",
                "quats",
                "coord_in",
                "coord_out",
            ]:
                if not detpointing.has_trait(trt):
                    msg = f"detector_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return detpointing

    @traitlets.validate("detector_weights")
    def _check_detector_weights(self, proposal):
        detweights = proposal["value"]
        if detweights is not None:
            if not isinstance(detweights, Operator):
                raise traitlets.TraitError(
                    "detector_weights should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in [
                "view",
                "weights",
                "mode",
            ]:
                if not detweights.has_trait(trt):
                    msg = f"detector_weights operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return detweights

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        if not available_utils:
            log = Logger.get()
            msg = "TOAST was compiled without the libaatm library, which is "
            msg += "required for observations of simulated atmosphere."
            log.error(msg)
            raise RuntimeError(msg)
        if not available_atm:
            log = Logger.get()
            msg = "TOAST was compiled without the SuiteSparse package, which is "
            msg += "required for observations of simulated atmosphere."
            log.error(msg)
            raise RuntimeError(msg)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        env = Environment.get()
        log = Logger.get()

        if self.detector_pointing is None:
            raise RuntimeError("The detector_pointing trait must be set")

        # Since each process group has the same set of observations, we use the group
        # communicator for collectively simulating the atmosphere slab for each
        # observation.
        comm = data.comm.comm_group
        group = data.comm.group
        rank = data.comm.group_rank
        comm_node = data.comm.comm_group_node
        comm_node_rank = data.comm.comm_group_node_rank

        view = self.view
        if view is None:
            # Use the same data view as detector pointing
            view = self.detector_pointing.view

        # Name of the intervals for ranges valid for a given wind chunk
        wind_intervals = "wind"

        # A view that combines user input and wind breaks
        if self.view is None and self.detector_pointing.view is None:
            temporary_view = wind_intervals
        else:
            temporary_view = "temporary_view"

        # The atmosphere sims are created and stored for each observing session.
        # This data key contains a dictionary of sims, keyed on session name.
        atm_sim_key = "atm_sim"

        # Generate (or load) the atmosphere realizations for all sessions
        gen_atm = GenerateAtmosphere(
            times=self.times,
            boresight=self.detector_pointing.boresight,
            wind_intervals=wind_intervals,
            shared_flags=self.shared_flags,
            shared_flag_mask=self.shared_flag_mask,
            output=atm_sim_key,
            turnaround_interval=self.turnaround_interval,
            realization=self.realization,
            component=self.component,
            lmin_center=self.lmin_center,
            lmin_sigma=self.lmin_sigma,
            lmax_center=self.lmax_center,
            lmax_sigma=self.lmax_sigma,
            gain=self.gain,
            zatm=self.zatm,
            zmax=self.zmax,
            xstep=self.xstep,
            ystep=self.ystep,
            zstep=self.zstep,
            z0_center=self.z0_center,
            z0_sigma=self.z0_sigma,
            wind_dist=self.wind_dist,
            fade_time=self.fade_time,
            sample_rate=self.sample_rate,
            nelem_sim_max=self.nelem_sim_max,
            cache_dir=self.cache_dir,
            overwrite_cache=self.overwrite_cache,
            cache_only=self.cache_only,
            debug_spectrum=self.debug_spectrum,
            debug_snapshots=self.debug_snapshots,
            debug_plots=self.debug_plots,
            field_of_view=self.field_of_view,
            corr_lim=self.corr_lim,
        )
        gen_atm.apply(data)

        if self.cache_only:
            # In this case, the simulated slabs were written to disk but never stored
            # in the output data key.
            return

        # Observation key for storing absorption and loading
        absorption_key = f"{self.name}_absorption"
        if self.add_loading:
            loading_key = f"{self.name}_loading"
        else:
            loading_key = None

        # Set up the observing operator
        if self.shared_flags is None:
            # Cannot observe samples that have no pointing
            shared_flags = self.detector_pointing.shared_flags
            shared_flag_mask = self.detector_pointing.shared_flag_mask
        else:
            # Trust that the user has provided a flag that excludes samples
            # without pointing
            shared_flags = self.shared_flags
            shared_flag_mask = self.shared_flag_mask

        observe_atm = ObserveAtmosphere(
            times=self.times,
            det_data=self.det_data,
            quats_azel=self.detector_pointing.quats,
            view=temporary_view,
            shared_flags=shared_flags,
            shared_flag_mask=shared_flag_mask,
            det_mask=self.det_mask,
            det_flags=self.det_flags,
            det_flag_mask=self.det_flag_mask,
            det_data_units=self.det_data_units,
            wind_view=wind_intervals,
            fade_time=self.fade_time,
            sim=atm_sim_key,
            absorption=absorption_key,
            loading=loading_key,
            n_bandpass_freqs=self.n_bandpass_freqs,
            gain=self.gain,
            polarization_fraction=self.polarization_fraction,
            sample_rate=self.sample_rate,
            debug_tod=self.debug_tod,
        )
        if self.detector_weights is not None:
            observe_atm.weights_mode = self.detector_weights.mode
            observe_atm.weights = self.detector_weights.weights

        for iobs, ob in enumerate(data.obs):
            if ob.name is None:
                msg = "Atmosphere simulation requires each observation to have a name"
                raise RuntimeError(msg)

            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            tmr = Timer()
            tmr.start()

            # Prefix for logging
            log_prefix = f"{group} : {ob.name} : "

            # Make sure detector data output exists
            exists = ob.detdata.ensure(
                self.det_data,
                detectors=dets,
                create_units=self.det_data_units,
            )

            # Check that our view is fully covered by detector pointing.  If the
            # detector_pointing view is None, then it has all samples.  If our own
            # view was None, then it would have been set to the detector_pointing
            # view above.
            if (view is not None) and (self.detector_pointing.view is not None):
                if ob.intervals[view] != ob.intervals[self.detector_pointing.view]:
                    # We need to check intersection
                    intervals = ob.intervals[self.view]
                    detector_intervals = ob.intervals[self.detector_pointing.view]
                    intersection = detector_intervals & intervals
                    if intersection != intervals:
                        msg = f"view {self.view} is not fully covered by valid "
                        msg += "detector pointing"
                        raise RuntimeError(msg)

            # Compute the absorption and loading for this observation
            self._common_absorption_and_loading(
                ob, dets, absorption_key, loading_key, comm
            )

            # Create temporary intervals by combining views
            if temporary_view != wind_intervals:
                ob.intervals[temporary_view] = (
                    ob.intervals[view] & ob.intervals[wind_intervals]
                )

            # Observation pipeline.  We do not want to store persistent detector
            # pointing, so we build a small pipeline that runs one detector at a
            # time on only the current observation.
            pipe_data = data.select(obs_index=iobs)

            operators = [self.detector_pointing]
            if self.detector_weights is not None:
                operators.append(self.detector_weights)
            operators.append(observe_atm)

            observe_pipe = Pipeline(operators=operators, detector_sets=["SINGLE"])
            observe_pipe.apply(pipe_data)

            # Delete the absorption and loading for this observation
            if absorption_key is not None:
                del ob[absorption_key]
            if loading_key is not None:
                del ob[loading_key]

            if comm is not None:
                comm.Barrier()
            if rank == 0:
                tmr.stop()
                log.debug(
                    f"{log_prefix}Simulate and observe atmosphere:  "
                    f"{tmr.seconds()} seconds"
                )

            if temporary_view != wind_intervals:
                del ob.intervals[temporary_view]
            del ob.intervals[wind_intervals]

        # Delete the atmosphere slabs for all sessions
        for sname in list(data[atm_sim_key].keys()):
            for wind_slabs in data[atm_sim_key][sname]:
                for slab in wind_slabs:
                    slab.close()
            del data[atm_sim_key][sname]
        del data[atm_sim_key]

    @function_timer
    def _common_absorption_and_loading(
        self, obs, dets, absorption_key, loading_key, comm
    ):
        """Compute the (common) absorption and loading prior to bandpass convolution."""

        if absorption_key is None and loading_key is None:
            return

        if obs.telescope.focalplane.bandpass is None:
            raise RuntimeError("Focalplane does not define bandpass")
        altitude = obs.telescope.site.earthloc.height
        weather = obs.telescope.site.weather
        bandpass = obs.telescope.focalplane.bandpass

        if absorption_key is None and loading_key is None:
            # Nothing to do
            return

        generate_absorption = False
        if absorption_key is not None:
            if absorption_key in obs:
                for det in dets:
                    if det not in obs[absorption_key]:
                        generate_absorption = True
                        break
            else:
                generate_absorption = True

        generate_loading = False
        if loading_key is not None:
            if loading_key in obs:
                for det in dets:
                    if det not in obs[loading_key]:
                        generate_loading = True
                        break
            else:
                generate_loading = True

        if (not generate_loading) and (not generate_absorption):
            # Nothing to do for these detectors
            return

        if generate_loading:
            if loading_key in obs:
                # Delete stale data
                del obs[loading_key]
            obs[loading_key] = dict()

        if generate_absorption:
            if absorption_key in obs:
                # Delete stale data
                del obs[absorption_key]
            obs[absorption_key] = dict()

        # The focalplane likely has groups of detectors whose bandpass spans
        # the same frequency range.  First we build this grouping.

        freq_groups = dict()
        for det in dets:
            dfmin, dfmax = bandpass.get_range(det=det)
            fkey = f"{dfmin} {dfmax}"
            if fkey not in freq_groups:
                freq_groups[fkey] = list()
            freq_groups[fkey].append(det)

        # Work on each frequency group of detectors.  Collectively use the
        # processes in the group to do the calculation.

        for fkey, fdets in freq_groups.items():
            freq_min, freq_max = bandpass.get_range(det=fdets[0])
            n_freq = self.n_bandpass_freqs
            freqs = np.linspace(freq_min, freq_max, n_freq)
            if comm is None:
                ntask = 1
                my_rank = 0
            else:
                ntask = comm.size
                my_rank = comm.rank
            n_freq_task = int(np.ceil(n_freq / ntask))
            my_start = min(my_rank * n_freq_task, n_freq)
            my_stop = min(my_start + n_freq_task, n_freq)
            my_n_freq = my_stop - my_start

            absorption = list()
            loading = list()

            if my_n_freq > 0:
                if generate_absorption:
                    absorption = atm_absorption_coefficient_vec(
                        altitude.to_value(u.meter),
                        weather.air_temperature.to_value(u.Kelvin),
                        weather.surface_pressure.to_value(u.Pa),
                        weather.pwv.to_value(u.mm),
                        freqs[my_start].to_value(u.GHz),
                        freqs[my_stop - 1].to_value(u.GHz),
                        my_n_freq,
                    )
                if generate_loading:
                    loading = atm_atmospheric_loading_vec(
                        altitude.to_value(u.meter),
                        weather.air_temperature.to_value(u.Kelvin),
                        weather.surface_pressure.to_value(u.Pa),
                        weather.pwv.to_value(u.mm),
                        freqs[my_start].to_value(u.GHz),
                        freqs[my_stop - 1].to_value(u.GHz),
                        my_n_freq,
                    )

            if comm is not None:
                if generate_absorption:
                    absorption = np.hstack(comm.allgather(absorption))
                if generate_loading:
                    loading = np.hstack(comm.allgather(loading))
            if generate_absorption:
                obs[absorption_key][fkey] = absorption
            if generate_loading:
                obs[loading_key][fkey] = loading

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": [
                self.boresight,
            ],
            "detdata": [self.detdata],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [
                self.det_data,
            ],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

add_loading = Bool(True, help='Add elevation-dependent loading.') class-attribute instance-attribute

cache_dir = Unicode(None, allow_none=True, help='Directory to use for loading / saving atmosphere realizations') class-attribute instance-attribute

cache_only = Bool(False, help='If True, only cache the atmosphere, do not observe it.') class-attribute instance-attribute

component = Int(123456, help='The component index to use for this atmosphere simulation') class-attribute instance-attribute

corr_lim = Float(0.001, help='Correlation limit is used to measure the correlation length of the simulation. Elements further than correlation length apart have their covariance set to zero.') class-attribute instance-attribute

debug_plots = Bool(False, help='If True, make plots of the debug snapshots') class-attribute instance-attribute

debug_snapshots = Bool(False, help='If True, dump snapshots of the atmosphere slabs to pickle files') class-attribute instance-attribute

debug_spectrum = Bool(False, help='If True, dump out Kolmogorov debug files') class-attribute instance-attribute

debug_tod = Bool(False, help='If True, dump TOD to pickle files') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for accumulating atmosphere timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for detector sample flagging') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for per-detector flagging') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight Az/El pointing into detector frame') class-attribute instance-attribute

detector_weights = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight Az/El pointing into detector weights') class-attribute instance-attribute

fade_time = Quantity(60.0 * u.s, help='Fade in/out time to avoid a step at wind break.') class-attribute instance-attribute

field_of_view = Quantity(None, allow_none=True, help='Override the focalplane field of view') class-attribute instance-attribute

gain = Float(1e-05, help='Scaling applied to the simulated TOD') class-attribute instance-attribute

lmax_center = Quantity(10.0 * u.meter, help='Kolmogorov turbulence injection scale center') class-attribute instance-attribute

lmax_sigma = Quantity(10.0 * u.meter, help='Kolmogorov turbulence injection scale sigma') class-attribute instance-attribute

lmin_center = Quantity(0.01 * u.meter, help='Kolmogorov turbulence dissipation scale center') class-attribute instance-attribute

lmin_sigma = Quantity(0.001 * u.meter, help='Kolmogorov turbulence dissipation scale sigma') class-attribute instance-attribute

n_bandpass_freqs = Int(100, help='The number of sampling frequencies used when convolving the bandpass with atmosphere absorption and loading') class-attribute instance-attribute

nelem_sim_max = Int(10000, help='Controls the size of the simulation slices') class-attribute instance-attribute

overwrite_cache = Bool(False, help='If True, redo and overwrite any cached atmospheric realizations.') class-attribute instance-attribute

polarization_fraction = Float(0, help='Polarization fraction (only Q polarization).') class-attribute instance-attribute

realization = Int(0, help='If simulating multiple realizations, the realization index') class-attribute instance-attribute

sample_rate = Quantity(None, allow_none=True, help='Rate at which to sample atmospheric TOD before interpolation. Default is no interpolation.') class-attribute instance-attribute

shared_flag_mask = Int(defaults.shared_mask_invalid, help='Bit mask value for optional flagging') class-attribute instance-attribute

shared_flags = Unicode(defaults.shared_flags, allow_none=True, help='Observation shared key for telescope flags to use') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

turnaround_interval = Unicode('turnaround', allow_none=True, help='Interval name for turnarounds') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

wind_dist = Quantity(3000.0 * u.meter, help='Maximum wind drift before discarding the volume and creating a new one') class-attribute instance-attribute

xstep = Quantity(100.0 * u.meter, help='Size of volume elements in X direction') class-attribute instance-attribute

ystep = Quantity(100.0 * u.meter, help='Size of volume elements in Y direction') class-attribute instance-attribute

z0_center = Quantity(2000.0 * u.meter, help='Central value of the water vapor distribution') class-attribute instance-attribute

z0_sigma = Quantity(0.0 * u.meter, help='Sigma of the water vapor distribution') class-attribute instance-attribute

zatm = Quantity(40000.0 * u.meter, help='Atmosphere extent for temperature profile') class-attribute instance-attribute

zmax = Quantity(2000.0 * u.meter, help='Atmosphere extent for water vapor integration') class-attribute instance-attribute

zstep = Quantity(100.0 * u.meter, help='Size of volume elements in Z direction') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_tod_atm.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    if not available_utils:
        log = Logger.get()
        msg = "TOAST was compiled without the libaatm library, which is "
        msg += "required for observations of simulated atmosphere."
        log.error(msg)
        raise RuntimeError(msg)
    if not available_atm:
        log = Logger.get()
        msg = "TOAST was compiled without the SuiteSparse package, which is "
        msg += "required for observations of simulated atmosphere."
        log.error(msg)
        raise RuntimeError(msg)

_check_det_flag_mask(proposal)

Source code in toast/ops/sim_tod_atm.py
217
218
219
220
221
222
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/sim_tod_atm.py
210
211
212
213
214
215
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_check_detector_pointing(proposal)

Source code in toast/ops/sim_tod_atm.py
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@traitlets.validate("detector_pointing")
def _check_detector_pointing(self, proposal):
    detpointing = proposal["value"]
    if detpointing is not None:
        if not isinstance(detpointing, Operator):
            raise traitlets.TraitError(
                "detector_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in [
            "view",
            "boresight",
            "shared_flags",
            "shared_flag_mask",
            "quats",
            "coord_in",
            "coord_out",
        ]:
            if not detpointing.has_trait(trt):
                msg = f"detector_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return detpointing

_check_detector_weights(proposal)

Source code in toast/ops/sim_tod_atm.py
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
@traitlets.validate("detector_weights")
def _check_detector_weights(self, proposal):
    detweights = proposal["value"]
    if detweights is not None:
        if not isinstance(detweights, Operator):
            raise traitlets.TraitError(
                "detector_weights should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in [
            "view",
            "weights",
            "mode",
        ]:
            if not detweights.has_trait(trt):
                msg = f"detector_weights operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return detweights

_check_shared_flag_mask(proposal)

Source code in toast/ops/sim_tod_atm.py
224
225
226
227
228
229
@traitlets.validate("shared_flag_mask")
def _check_shared_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Flag mask should be a positive integer")
    return check

_common_absorption_and_loading(obs, dets, absorption_key, loading_key, comm)

Compute the (common) absorption and loading prior to bandpass convolution.

Source code in toast/ops/sim_tod_atm.py
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
@function_timer
def _common_absorption_and_loading(
    self, obs, dets, absorption_key, loading_key, comm
):
    """Compute the (common) absorption and loading prior to bandpass convolution."""

    if absorption_key is None and loading_key is None:
        return

    if obs.telescope.focalplane.bandpass is None:
        raise RuntimeError("Focalplane does not define bandpass")
    altitude = obs.telescope.site.earthloc.height
    weather = obs.telescope.site.weather
    bandpass = obs.telescope.focalplane.bandpass

    if absorption_key is None and loading_key is None:
        # Nothing to do
        return

    generate_absorption = False
    if absorption_key is not None:
        if absorption_key in obs:
            for det in dets:
                if det not in obs[absorption_key]:
                    generate_absorption = True
                    break
        else:
            generate_absorption = True

    generate_loading = False
    if loading_key is not None:
        if loading_key in obs:
            for det in dets:
                if det not in obs[loading_key]:
                    generate_loading = True
                    break
        else:
            generate_loading = True

    if (not generate_loading) and (not generate_absorption):
        # Nothing to do for these detectors
        return

    if generate_loading:
        if loading_key in obs:
            # Delete stale data
            del obs[loading_key]
        obs[loading_key] = dict()

    if generate_absorption:
        if absorption_key in obs:
            # Delete stale data
            del obs[absorption_key]
        obs[absorption_key] = dict()

    # The focalplane likely has groups of detectors whose bandpass spans
    # the same frequency range.  First we build this grouping.

    freq_groups = dict()
    for det in dets:
        dfmin, dfmax = bandpass.get_range(det=det)
        fkey = f"{dfmin} {dfmax}"
        if fkey not in freq_groups:
            freq_groups[fkey] = list()
        freq_groups[fkey].append(det)

    # Work on each frequency group of detectors.  Collectively use the
    # processes in the group to do the calculation.

    for fkey, fdets in freq_groups.items():
        freq_min, freq_max = bandpass.get_range(det=fdets[0])
        n_freq = self.n_bandpass_freqs
        freqs = np.linspace(freq_min, freq_max, n_freq)
        if comm is None:
            ntask = 1
            my_rank = 0
        else:
            ntask = comm.size
            my_rank = comm.rank
        n_freq_task = int(np.ceil(n_freq / ntask))
        my_start = min(my_rank * n_freq_task, n_freq)
        my_stop = min(my_start + n_freq_task, n_freq)
        my_n_freq = my_stop - my_start

        absorption = list()
        loading = list()

        if my_n_freq > 0:
            if generate_absorption:
                absorption = atm_absorption_coefficient_vec(
                    altitude.to_value(u.meter),
                    weather.air_temperature.to_value(u.Kelvin),
                    weather.surface_pressure.to_value(u.Pa),
                    weather.pwv.to_value(u.mm),
                    freqs[my_start].to_value(u.GHz),
                    freqs[my_stop - 1].to_value(u.GHz),
                    my_n_freq,
                )
            if generate_loading:
                loading = atm_atmospheric_loading_vec(
                    altitude.to_value(u.meter),
                    weather.air_temperature.to_value(u.Kelvin),
                    weather.surface_pressure.to_value(u.Pa),
                    weather.pwv.to_value(u.mm),
                    freqs[my_start].to_value(u.GHz),
                    freqs[my_stop - 1].to_value(u.GHz),
                    my_n_freq,
                )

        if comm is not None:
            if generate_absorption:
                absorption = np.hstack(comm.allgather(absorption))
            if generate_loading:
                loading = np.hstack(comm.allgather(loading))
        if generate_absorption:
            obs[absorption_key][fkey] = absorption
        if generate_loading:
            obs[loading_key][fkey] = loading

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_tod_atm.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    env = Environment.get()
    log = Logger.get()

    if self.detector_pointing is None:
        raise RuntimeError("The detector_pointing trait must be set")

    # Since each process group has the same set of observations, we use the group
    # communicator for collectively simulating the atmosphere slab for each
    # observation.
    comm = data.comm.comm_group
    group = data.comm.group
    rank = data.comm.group_rank
    comm_node = data.comm.comm_group_node
    comm_node_rank = data.comm.comm_group_node_rank

    view = self.view
    if view is None:
        # Use the same data view as detector pointing
        view = self.detector_pointing.view

    # Name of the intervals for ranges valid for a given wind chunk
    wind_intervals = "wind"

    # A view that combines user input and wind breaks
    if self.view is None and self.detector_pointing.view is None:
        temporary_view = wind_intervals
    else:
        temporary_view = "temporary_view"

    # The atmosphere sims are created and stored for each observing session.
    # This data key contains a dictionary of sims, keyed on session name.
    atm_sim_key = "atm_sim"

    # Generate (or load) the atmosphere realizations for all sessions
    gen_atm = GenerateAtmosphere(
        times=self.times,
        boresight=self.detector_pointing.boresight,
        wind_intervals=wind_intervals,
        shared_flags=self.shared_flags,
        shared_flag_mask=self.shared_flag_mask,
        output=atm_sim_key,
        turnaround_interval=self.turnaround_interval,
        realization=self.realization,
        component=self.component,
        lmin_center=self.lmin_center,
        lmin_sigma=self.lmin_sigma,
        lmax_center=self.lmax_center,
        lmax_sigma=self.lmax_sigma,
        gain=self.gain,
        zatm=self.zatm,
        zmax=self.zmax,
        xstep=self.xstep,
        ystep=self.ystep,
        zstep=self.zstep,
        z0_center=self.z0_center,
        z0_sigma=self.z0_sigma,
        wind_dist=self.wind_dist,
        fade_time=self.fade_time,
        sample_rate=self.sample_rate,
        nelem_sim_max=self.nelem_sim_max,
        cache_dir=self.cache_dir,
        overwrite_cache=self.overwrite_cache,
        cache_only=self.cache_only,
        debug_spectrum=self.debug_spectrum,
        debug_snapshots=self.debug_snapshots,
        debug_plots=self.debug_plots,
        field_of_view=self.field_of_view,
        corr_lim=self.corr_lim,
    )
    gen_atm.apply(data)

    if self.cache_only:
        # In this case, the simulated slabs were written to disk but never stored
        # in the output data key.
        return

    # Observation key for storing absorption and loading
    absorption_key = f"{self.name}_absorption"
    if self.add_loading:
        loading_key = f"{self.name}_loading"
    else:
        loading_key = None

    # Set up the observing operator
    if self.shared_flags is None:
        # Cannot observe samples that have no pointing
        shared_flags = self.detector_pointing.shared_flags
        shared_flag_mask = self.detector_pointing.shared_flag_mask
    else:
        # Trust that the user has provided a flag that excludes samples
        # without pointing
        shared_flags = self.shared_flags
        shared_flag_mask = self.shared_flag_mask

    observe_atm = ObserveAtmosphere(
        times=self.times,
        det_data=self.det_data,
        quats_azel=self.detector_pointing.quats,
        view=temporary_view,
        shared_flags=shared_flags,
        shared_flag_mask=shared_flag_mask,
        det_mask=self.det_mask,
        det_flags=self.det_flags,
        det_flag_mask=self.det_flag_mask,
        det_data_units=self.det_data_units,
        wind_view=wind_intervals,
        fade_time=self.fade_time,
        sim=atm_sim_key,
        absorption=absorption_key,
        loading=loading_key,
        n_bandpass_freqs=self.n_bandpass_freqs,
        gain=self.gain,
        polarization_fraction=self.polarization_fraction,
        sample_rate=self.sample_rate,
        debug_tod=self.debug_tod,
    )
    if self.detector_weights is not None:
        observe_atm.weights_mode = self.detector_weights.mode
        observe_atm.weights = self.detector_weights.weights

    for iobs, ob in enumerate(data.obs):
        if ob.name is None:
            msg = "Atmosphere simulation requires each observation to have a name"
            raise RuntimeError(msg)

        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors, flagmask=self.det_mask)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        tmr = Timer()
        tmr.start()

        # Prefix for logging
        log_prefix = f"{group} : {ob.name} : "

        # Make sure detector data output exists
        exists = ob.detdata.ensure(
            self.det_data,
            detectors=dets,
            create_units=self.det_data_units,
        )

        # Check that our view is fully covered by detector pointing.  If the
        # detector_pointing view is None, then it has all samples.  If our own
        # view was None, then it would have been set to the detector_pointing
        # view above.
        if (view is not None) and (self.detector_pointing.view is not None):
            if ob.intervals[view] != ob.intervals[self.detector_pointing.view]:
                # We need to check intersection
                intervals = ob.intervals[self.view]
                detector_intervals = ob.intervals[self.detector_pointing.view]
                intersection = detector_intervals & intervals
                if intersection != intervals:
                    msg = f"view {self.view} is not fully covered by valid "
                    msg += "detector pointing"
                    raise RuntimeError(msg)

        # Compute the absorption and loading for this observation
        self._common_absorption_and_loading(
            ob, dets, absorption_key, loading_key, comm
        )

        # Create temporary intervals by combining views
        if temporary_view != wind_intervals:
            ob.intervals[temporary_view] = (
                ob.intervals[view] & ob.intervals[wind_intervals]
            )

        # Observation pipeline.  We do not want to store persistent detector
        # pointing, so we build a small pipeline that runs one detector at a
        # time on only the current observation.
        pipe_data = data.select(obs_index=iobs)

        operators = [self.detector_pointing]
        if self.detector_weights is not None:
            operators.append(self.detector_weights)
        operators.append(observe_atm)

        observe_pipe = Pipeline(operators=operators, detector_sets=["SINGLE"])
        observe_pipe.apply(pipe_data)

        # Delete the absorption and loading for this observation
        if absorption_key is not None:
            del ob[absorption_key]
        if loading_key is not None:
            del ob[loading_key]

        if comm is not None:
            comm.Barrier()
        if rank == 0:
            tmr.stop()
            log.debug(
                f"{log_prefix}Simulate and observe atmosphere:  "
                f"{tmr.seconds()} seconds"
            )

        if temporary_view != wind_intervals:
            del ob.intervals[temporary_view]
        del ob.intervals[wind_intervals]

    # Delete the atmosphere slabs for all sessions
    for sname in list(data[atm_sim_key].keys()):
        for wind_slabs in data[atm_sim_key][sname]:
            for slab in wind_slabs:
                slab.close()
        del data[atm_sim_key][sname]
    del data[atm_sim_key]

_finalize(data, **kwargs)

Source code in toast/ops/sim_tod_atm.py
619
620
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_tod_atm.py
635
636
637
638
639
640
641
642
643
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [
            self.det_data,
        ],
    }
    return prov

_requires()

Source code in toast/ops/sim_tod_atm.py
622
623
624
625
626
627
628
629
630
631
632
633
def _requires(self):
    req = {
        "meta": list(),
        "shared": [
            self.boresight,
        ],
        "detdata": [self.detdata],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

toast.ops.SimScanSynchronousSignal

Bases: Operator

Operator which generates scan-synchronous signal timestreams.

Source code in toast/ops/sss.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
@trait_docs
class SimScanSynchronousSignal(Operator):
    """Operator which generates scan-synchronous signal timestreams."""

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    realization = Int(0, help="The simulation realization index")

    component = Int(663056, help="The simulation component index")

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating simulated timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight Az/El pointing into detector frame",
    )

    pol = Bool(False, help="Ground map is polarized")

    nside = Int(128, help="Ground map healpix resolution")

    fwhm = Quantity(10 * u.arcmin, help="Ground map smoothing scale")

    lmax = Int(256, help="Ground map expansion order")

    scale = Quantity(1 * u.mK, help="RMS of the ground signal fluctuations at el=45deg")

    power = Float(
        -1,
        help="Exponential for suppressing ground pickup at higher observing elevation",
    )

    path = Unicode(
        None,
        allow_none=True,
        help="Path to a horizontal Healpix map to sample for the SSS *instead* "
        "of synthesizing Gaussian maps",
    )

    sss_map = "sss_map"

    @traitlets.validate("realization")
    def _check_realization(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("realization index must be positive")
        return check

    @traitlets.validate("component")
    def _check_component(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("component index must be positive")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        group = data.comm.group
        comm = data.comm.comm_group

        for obs in data.obs:
            dets = obs.select_local_detectors(
                detectors, flagmask=defaults.det_mask_invalid
            )
            log_prefix = f"{group} : {obs.name} : "

            exists = obs.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )

            # The detector data units
            self.units = obs.detdata[self.det_data].units

            site = obs.telescope.site
            weather = site.weather

            key1, key2, counter1, counter2 = self._get_rng_keys(obs)

            log.debug_rank(f"{log_prefix}Simulating SSS", comm=comm)

            self._simulate_sss(obs, key1, key2, counter1, counter2, weather, comm)

            log.debug_rank(f"{log_prefix}Observing SSS", comm=comm)

            self._observe_sss(data, obs, dets)

        return

    @function_timer
    def _get_rng_keys(self, obs):
        """
        The random number generator accepts a key and a counter,
        each made of two 64bit integers.
        Following tod_math.py we set
        key1 = realization * 2^32 + telescope * 2^16 + component
        key2 = sindx * 2^32
        counter1 = hierarchical cone counter
        counter2 = sample in stream
        """
        telescope = obs.telescope.uid
        site = obs.telescope.site.uid
        sindx = obs.session.uid
        key1 = self.realization * 2**32 + telescope * 2**16 + self.component
        key2 = site * 2**16 + sindx
        counter1 = 0
        counter2 = 0
        return key1, key2, counter1, counter2

    @function_timer
    def _simulate_sss(self, obs, key1, key2, counter1, counter2, weather, comm):
        """
        Create a map of the ground signal to observe with all detectors
        """
        # We may already have cached the SSS map
        if self.sss_map in obs.shared and "sss_realization" in obs:
            if obs["sss_realization"] == self.realization:
                return

        # Surface temperature is made available but not used yet
        # to scale the SSS
        dtype = np.float32
        if comm is None or comm.rank == 0:
            # Only the root process loads or simulates the map
            temperature = weather.surface_temperature
            if self.path:
                if self.pol:
                    sss_map = hp.read_map(self.path, [0, 1, 2], dtype=dtype)
                else:
                    sss_map = [hp.read_map(self.path, dtype=dtype)]
            else:
                npix = 12 * self.nside**2
                sss_map = rng.random(
                    npix,
                    key=(key1, key2),
                    counter=(counter1, counter2),
                    sampler="gaussian",
                )
                sss_map = np.array(sss_map, dtype=dtype)
                sss_map = hp.smoothing(
                    sss_map, fwhm=self.fwhm.to_value(u.radian), lmax=self.lmax
                ).astype(dtype)
                sss_map /= np.std(sss_map)
                lon, lat = hp.pix2ang(
                    self.nside, np.arange(npix, dtype=np.int64), lonlat=True
                )
                scale = self.scale * (np.abs(lat) / 90 + 0.5) ** self.power
                sss_map *= scale.to_value(self.units)
                if self.pol:
                    # Mock up a 10% Q-polarized ground signal using the
                    # simulated intensity
                    sss_map = [sss_map, sss_map * 0.1, sss_map * 0]
                else:
                    sss_map = [sss_map]
            sss_map = np.vstack(sss_map)
            nmap, npix = sss_map.shape
        else:
            npix = None
            nmap = None
            sss_map = None

        if comm is not None:
            npix = comm.bcast(npix)
            nmap = comm.bcast(nmap)
        self.nside = hp.npix2nside(npix)
        obs.shared.create_group(self.sss_map, shape=(nmap, npix), dtype=dtype)
        obs.shared[self.sss_map].set(sss_map, fromrank=0)
        obs["sss_realization"] = self.realization

        return

    @function_timer
    def _observe_sss(self, data, obs, dets):
        """
        Use healpy bilinear interpolation to observe the ground signal map
        """

        sss_maps = obs.shared[self.sss_map].data

        for det in dets:
            signal = obs.detdata[self.det_data][det]

            try:
                # Use cached detector quaternions
                quats = obs.detdata[self.detector_pointing.quats][det]
            except KeyError:
                # Compute the detector quaternions
                obs_data = data.select(obs_uid=obs.uid)
                self.detector_pointing.apply(obs_data, detectors=[det])
                quats = obs.detdata[self.detector_pointing.quats][det]

            # Convert Az/El quaternion of the detector into angles
            theta, phi, psi = qa.to_iso_angles(quats)
            stokes_weights = [np.ones(signal.size)]
            if self.pol:
                stokes_weights.append(np.cos(2 * psi))
                stokes_weights.append(np.sin(2 * psi))

            # hp.get_interp_val(sss_map, theta, phi)
            pixels, weights = hp.get_interp_weights(self.nside, theta, phi)
            for p, w in zip(pixels, weights):
                for sss_map, wstokes in zip(sss_maps, stokes_weights):
                    signal += sss_map[p] * w * wstokes

        return

    def finalize(self, data, **kwargs):
        for obs in data.obs:
            del obs.shared[self.sss_map]
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": list(),
            "detdata": [self.det_data],
            "intervals": list(),
        }
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [self.det_data],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

component = Int(663056, help='The simulation component index') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for accumulating simulated timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight Az/El pointing into detector frame') class-attribute instance-attribute

fwhm = Quantity(10 * u.arcmin, help='Ground map smoothing scale') class-attribute instance-attribute

lmax = Int(256, help='Ground map expansion order') class-attribute instance-attribute

nside = Int(128, help='Ground map healpix resolution') class-attribute instance-attribute

path = Unicode(None, allow_none=True, help='Path to a horizontal Healpix map to sample for the SSS *instead* of synthesizing Gaussian maps') class-attribute instance-attribute

pol = Bool(False, help='Ground map is polarized') class-attribute instance-attribute

power = Float(-1, help='Exponential for suppressing ground pickup at higher observing elevation') class-attribute instance-attribute

realization = Int(0, help='The simulation realization index') class-attribute instance-attribute

scale = Quantity(1 * u.mK, help='RMS of the ground signal fluctuations at el=45deg') class-attribute instance-attribute

sss_map = 'sss_map' class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sss.py
91
92
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_check_component(proposal)

Source code in toast/ops/sss.py
84
85
86
87
88
89
@traitlets.validate("component")
def _check_component(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("component index must be positive")
    return check

_check_realization(proposal)

Source code in toast/ops/sss.py
77
78
79
80
81
82
@traitlets.validate("realization")
def _check_realization(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("realization index must be positive")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sss.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    group = data.comm.group
    comm = data.comm.comm_group

    for obs in data.obs:
        dets = obs.select_local_detectors(
            detectors, flagmask=defaults.det_mask_invalid
        )
        log_prefix = f"{group} : {obs.name} : "

        exists = obs.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )

        # The detector data units
        self.units = obs.detdata[self.det_data].units

        site = obs.telescope.site
        weather = site.weather

        key1, key2, counter1, counter2 = self._get_rng_keys(obs)

        log.debug_rank(f"{log_prefix}Simulating SSS", comm=comm)

        self._simulate_sss(obs, key1, key2, counter1, counter2, weather, comm)

        log.debug_rank(f"{log_prefix}Observing SSS", comm=comm)

        self._observe_sss(data, obs, dets)

    return

_get_rng_keys(obs)

The random number generator accepts a key and a counter, each made of two 64bit integers. Following tod_math.py we set key1 = realization * 2^32 + telescope * 2^16 + component key2 = sindx * 2^32 counter1 = hierarchical cone counter counter2 = sample in stream

Source code in toast/ops/sss.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@function_timer
def _get_rng_keys(self, obs):
    """
    The random number generator accepts a key and a counter,
    each made of two 64bit integers.
    Following tod_math.py we set
    key1 = realization * 2^32 + telescope * 2^16 + component
    key2 = sindx * 2^32
    counter1 = hierarchical cone counter
    counter2 = sample in stream
    """
    telescope = obs.telescope.uid
    site = obs.telescope.site.uid
    sindx = obs.session.uid
    key1 = self.realization * 2**32 + telescope * 2**16 + self.component
    key2 = site * 2**16 + sindx
    counter1 = 0
    counter2 = 0
    return key1, key2, counter1, counter2

_observe_sss(data, obs, dets)

Use healpy bilinear interpolation to observe the ground signal map

Source code in toast/ops/sss.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
@function_timer
def _observe_sss(self, data, obs, dets):
    """
    Use healpy bilinear interpolation to observe the ground signal map
    """

    sss_maps = obs.shared[self.sss_map].data

    for det in dets:
        signal = obs.detdata[self.det_data][det]

        try:
            # Use cached detector quaternions
            quats = obs.detdata[self.detector_pointing.quats][det]
        except KeyError:
            # Compute the detector quaternions
            obs_data = data.select(obs_uid=obs.uid)
            self.detector_pointing.apply(obs_data, detectors=[det])
            quats = obs.detdata[self.detector_pointing.quats][det]

        # Convert Az/El quaternion of the detector into angles
        theta, phi, psi = qa.to_iso_angles(quats)
        stokes_weights = [np.ones(signal.size)]
        if self.pol:
            stokes_weights.append(np.cos(2 * psi))
            stokes_weights.append(np.sin(2 * psi))

        # hp.get_interp_val(sss_map, theta, phi)
        pixels, weights = hp.get_interp_weights(self.nside, theta, phi)
        for p, w in zip(pixels, weights):
            for sss_map, wstokes in zip(sss_maps, stokes_weights):
                signal += sss_map[p] * w * wstokes

    return

_provides()

Source code in toast/ops/sss.py
260
261
262
263
264
265
266
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [self.det_data],
    }
    return prov

_requires()

Source code in toast/ops/sss.py
251
252
253
254
255
256
257
258
def _requires(self):
    req = {
        "meta": list(),
        "shared": list(),
        "detdata": [self.det_data],
        "intervals": list(),
    }
    return req

_simulate_sss(obs, key1, key2, counter1, counter2, weather, comm)

Create a map of the ground signal to observe with all detectors

Source code in toast/ops/sss.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
@function_timer
def _simulate_sss(self, obs, key1, key2, counter1, counter2, weather, comm):
    """
    Create a map of the ground signal to observe with all detectors
    """
    # We may already have cached the SSS map
    if self.sss_map in obs.shared and "sss_realization" in obs:
        if obs["sss_realization"] == self.realization:
            return

    # Surface temperature is made available but not used yet
    # to scale the SSS
    dtype = np.float32
    if comm is None or comm.rank == 0:
        # Only the root process loads or simulates the map
        temperature = weather.surface_temperature
        if self.path:
            if self.pol:
                sss_map = hp.read_map(self.path, [0, 1, 2], dtype=dtype)
            else:
                sss_map = [hp.read_map(self.path, dtype=dtype)]
        else:
            npix = 12 * self.nside**2
            sss_map = rng.random(
                npix,
                key=(key1, key2),
                counter=(counter1, counter2),
                sampler="gaussian",
            )
            sss_map = np.array(sss_map, dtype=dtype)
            sss_map = hp.smoothing(
                sss_map, fwhm=self.fwhm.to_value(u.radian), lmax=self.lmax
            ).astype(dtype)
            sss_map /= np.std(sss_map)
            lon, lat = hp.pix2ang(
                self.nside, np.arange(npix, dtype=np.int64), lonlat=True
            )
            scale = self.scale * (np.abs(lat) / 90 + 0.5) ** self.power
            sss_map *= scale.to_value(self.units)
            if self.pol:
                # Mock up a 10% Q-polarized ground signal using the
                # simulated intensity
                sss_map = [sss_map, sss_map * 0.1, sss_map * 0]
            else:
                sss_map = [sss_map]
        sss_map = np.vstack(sss_map)
        nmap, npix = sss_map.shape
    else:
        npix = None
        nmap = None
        sss_map = None

    if comm is not None:
        npix = comm.bcast(npix)
        nmap = comm.bcast(nmap)
    self.nside = hp.npix2nside(npix)
    obs.shared.create_group(self.sss_map, shape=(nmap, npix), dtype=dtype)
    obs.shared[self.sss_map].set(sss_map, fromrank=0)
    obs["sss_realization"] = self.realization

    return

finalize(data, **kwargs)

Source code in toast/ops/sss.py
246
247
248
249
def finalize(self, data, **kwargs):
    for obs in data.obs:
        del obs.shared[self.sss_map]
    return

Instrument Signals

These operators simulate instrumental effects from sources of power inside the telescope and receiver.

toast.ops.DefaultNoiseModel

Bases: Operator

Create a default noise model from focalplane parameters.

A noise model is used by other operations such as simulating noise timestreams and also map making. This operator uses the detector properties from the focalplane in each observation to create a simple AnalyticNoise model.

Source code in toast/ops/noise_model.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
@trait_docs
class DefaultNoiseModel(Operator):
    """Create a default noise model from focalplane parameters.

    A noise model is used by other operations such as simulating noise timestreams
    and also map making.  This operator uses the detector properties from the
    focalplane in each observation to create a simple AnalyticNoise model.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    noise_model = Unicode(
        "noise_model", help="The observation key for storing the noise model"
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        noise_keys = set(["psd_fmin", "psd_fknee", "psd_alpha", "psd_net"])

        for ob in data.obs:
            fp_data = ob.telescope.focalplane.detector_data
            has_parameters = False
            for key in noise_keys:
                if key not in fp_data.colnames:
                    break
            else:
                has_parameters = True
            if not has_parameters:
                msg = f"Observation {ob.name} does not have a focalplane with "
                msg += "noise parameters.  Skipping."
                log.warning(msg)
                ob[self.noise_model] = None
                continue

            local_dets = set(ob.local_detectors)

            dets = []
            fmin = {}
            fknee = {}
            alpha = {}
            NET = {}
            rates = {}
            indices = {}

            for row in fp_data:
                name = row["name"]
                if name not in local_dets:
                    continue
                dets.append(name)
                rates[name] = ob.telescope.focalplane.sample_rate
                fmin[name] = row["psd_fmin"]
                fknee[name] = row["psd_fknee"]
                alpha[name] = row["psd_alpha"]
                NET[name] = row["psd_net"]
                indices[name] = row["uid"]

            ob[self.noise_model] = AnalyticNoise(
                rate=rates,
                fmin=fmin,
                detectors=dets,
                fknee=fknee,
                alpha=alpha,
                NET=NET,
                indices=indices,
            )

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        return dict()

    def _provides(self):
        prov = {"meta": [self.noise_model]}
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

noise_model = Unicode('noise_model', help='The observation key for storing the noise model') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/noise_model.py
40
41
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/noise_model.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    noise_keys = set(["psd_fmin", "psd_fknee", "psd_alpha", "psd_net"])

    for ob in data.obs:
        fp_data = ob.telescope.focalplane.detector_data
        has_parameters = False
        for key in noise_keys:
            if key not in fp_data.colnames:
                break
        else:
            has_parameters = True
        if not has_parameters:
            msg = f"Observation {ob.name} does not have a focalplane with "
            msg += "noise parameters.  Skipping."
            log.warning(msg)
            ob[self.noise_model] = None
            continue

        local_dets = set(ob.local_detectors)

        dets = []
        fmin = {}
        fknee = {}
        alpha = {}
        NET = {}
        rates = {}
        indices = {}

        for row in fp_data:
            name = row["name"]
            if name not in local_dets:
                continue
            dets.append(name)
            rates[name] = ob.telescope.focalplane.sample_rate
            fmin[name] = row["psd_fmin"]
            fknee[name] = row["psd_fknee"]
            alpha[name] = row["psd_alpha"]
            NET[name] = row["psd_net"]
            indices[name] = row["uid"]

        ob[self.noise_model] = AnalyticNoise(
            rate=rates,
            fmin=fmin,
            detectors=dets,
            fknee=fknee,
            alpha=alpha,
            NET=NET,
            indices=indices,
        )

_finalize(data, **kwargs)

Source code in toast/ops/noise_model.py
96
97
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/noise_model.py
102
103
104
def _provides(self):
    prov = {"meta": [self.noise_model]}
    return prov

_requires()

Source code in toast/ops/noise_model.py
 99
100
def _requires(self):
    return dict()

toast.ops.ElevationNoise

Bases: Operator

Modify detector noise model based on elevation. Optionally include PWV modulation.

This adjusts the detector PSDs in a noise model based on the median elevation of each detector in each observation.

The PSD value scaled by:

.. math:: PSD_{new} = PSD_{old} * (a / sin(el) + c)^2

NOTE: since this operator generates a new noise model for all detectors, you should specify all detectors you intend to use downstream when calling exec().

If the view trait is not specified, then this operator will use the same data view as the detector pointing operator when computing the pointing matrix pixels and weights.

If the output model is not specified, then the input is modified in place.

Source code in toast/ops/elevation_noise.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
@trait_docs
class ElevationNoise(Operator):
    """Modify detector noise model based on elevation.
    Optionally include PWV modulation.

    This adjusts the detector PSDs in a noise model based on the median elevation of
    each detector in each observation.

    The PSD value scaled by:

    .. math::
        PSD_{new} = PSD_{old} * (a / sin(el) + c)^2

    NOTE: since this operator generates a new noise model for all detectors, you
    should specify all detectors you intend to use downstream when calling exec().

    If the view trait is not specified, then this operator will use the same data
    view as the detector pointing operator when computing the pointing matrix pixels
    and weights.

    If the output model is not specified, then the input is modified in place.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    noise_model = Unicode(
        "noise_model", help="The observation key containing the input noise model"
    )

    out_model = Unicode(
        None, allow_none=True, help="Create a new noise model with this name"
    )

    detector_pointing = Instance(
        klass=Operator,
        allow_none=True,
        help="Operator that translates boresight Az / El pointing into detector frame",
    )

    view = Unicode(
        None,
        allow_none=True,
        help="Use this view of the data in all observations.  "
        "Use 'middle' if the middle 10 seconds of each observation is enough to "
        "determine the effective observing elevation",
    )

    noise_a = Float(
        None,
        allow_none=True,
        help="Parameter 'a' in (a / sin(el) + c).  "
        "If not set, look for one in the Focalplane.",
    )

    noise_c = Float(
        None,
        allow_none=True,
        help="Parameter 'c' in (a / sin(el) + c).  "
        "If not set, look for one in the Focalplane.",
    )

    pwv_a0 = Float(
        None,
        allow_none=True,
        help="Parameter 'a0' in (a0 + pwv * a1 + pwv ** 2 * a2). "
        " If not set, look for one in the Focalplane.",
    )

    pwv_a1 = Float(
        None,
        allow_none=True,
        help="Parameter 'a1' in (a0 + pwv * a1 + pwv ** 2 * a2). "
        " If not set, look for one in the Focalplane.",
    )

    pwv_a2 = Float(
        None,
        allow_none=True,
        help="Parameter 'a2' in (a0 + pwv * a1 + pwv ** 2 * a2). "
        " If not set, look for one in the Focalplane.",
    )

    modulate_pwv = Bool(False, help="If True, modulate the NET based on PWV")

    extra_factor = Float(
        None,
        allow_none=True,
        help="Extra multiplier to the NET scaling",
    )

    @traitlets.validate("detector_pointing")
    def _check_detector_pointing(self, proposal):
        detpointing = proposal["value"]
        if detpointing is not None:
            if not isinstance(detpointing, Operator):
                raise traitlets.TraitError(
                    "detector_pointing should be an Operator instance"
                )
            # Check that this operator has the traits we expect
            for trt in [
                "view",
                "boresight",
                "shared_flags",
                "shared_flag_mask",
                "quats",
                "coord_in",
                "coord_out",
            ]:
                if not detpointing.has_trait(trt):
                    msg = f"detector_pointing operator should have a '{trt}' trait"
                    raise traitlets.TraitError(msg)
        return detpointing

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.net_factors = []
        self.total_factors = []
        self.weights_in = []
        self.weights_out = []
        self.rates = []

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        if self.detector_pointing is None:
            msg = "You must set the detector_pointing trait before calling exec()"
            log.error(msg)
            raise RuntimeError(msg)

        if detectors is not None:
            msg = "You must run this operator on all detectors at once"
            log.error(msg)
            raise RuntimeError(msg)

        for obs in data.obs:
            if not obs.is_distributed_by_detector:
                msg = "Observation data must be distributed by detector, not samples"
                log.error(msg)
                raise RuntimeError(msg)
            obs_data = data.select(obs_uid=obs.uid)
            focalplane = obs.telescope.focalplane

            if self.view == "middle" and self.view not in obs.intervals:
                # Create a view that is just one minute in the middle
                length = 10.0  # in seconds
                times = obs.shared[self.times]
                t_start = times[0]
                t_stop = times[-1]
                t_middle = np.mean([t_start, t_stop])
                t_start = max(t_start, t_middle - length / 2)
                t_stop = min(t_stop, t_middle + length / 2)
                obs.intervals[self.view] = IntervalList(
                    timestamps=times, timespans=[(t_start, t_stop)]
                )

            # Check that the noise model exists
            if self.noise_model not in obs or obs[self.noise_model] is None:
                msg = (
                    "Noise model {self.noise_model} does not exist in "
                    "observation {obs.name}"
                )
                raise RuntimeError(msg)

            # Check that the view in the detector pointing operator covers
            # all the samples needed by this operator

            view = self.view
            detector_pointing_view = self.detector_pointing.view
            if view is None:
                # Use the same data view as detector pointing
                view = self.detector_pointing.view
            elif self.detector_pointing.view is not None:
                # Check that our view is fully covered by detector pointing
                intervals = obs.intervals[self.view]
                detector_intervals = obs.intervals[self.detector_pointing.view]
                intersection = detector_intervals & intervals
                if intersection != intervals:
                    msg = (
                        f"view {self.view} is not fully covered by valid "
                        "detector pointing"
                    )
                    raise RuntimeError(msg)
            self.detector_pointing.view = view

            noise = obs[self.noise_model]

            # We will be collectively building the scale factor for all detectors.
            # Allocate arrays for communication.

            local_dets = obs.select_local_detectors(flagmask=defaults.det_mask_invalid)

            local_net_factors = np.zeros(len(local_dets), dtype=np.float64)
            local_tot_factors = np.zeros(len(local_dets), dtype=np.float64)
            local_rates = np.zeros(len(local_dets), dtype=np.float64)
            local_weights_in = list()

            # We are building up a data product (a noise model) which has values for
            # all detectors.  For each detector we need to expand the detector pointing.
            # Since the contributions for all views contribute to the scaling for each
            # detector, we loop over detectors first and then views.

            views = obs.view[view]

            # The flags are common to all detectors, so we compute them once.

            view_flags = list()
            for vw in range(len(views)):
                # Get the flags if needed.  Use the same flags as detector pointing.
                flags = None
                if self.detector_pointing.shared_flags is not None:
                    flags = np.array(
                        views.shared[self.detector_pointing.shared_flags][vw]
                    )
                    flags &= self.detector_pointing.shared_flag_mask
                    # If there are no valid samples, ignore detector flags and
                    # *hope* that we can still get an approximate elevation
                    if np.all(flags != 0):
                        flags = None
                view_flags.append(flags)

            for idet, det in enumerate(local_dets):
                local_rates[idet] = focalplane.sample_rate.to_value(u.Hz)
                local_weights_in.append(noise.detector_weight(det))

                # If both the A and C values are unset, the noise model is not modified.
                if self.noise_a is not None:
                    noise_a = self.noise_a
                    noise_c = self.noise_c
                elif "elevation_noise_a" in focalplane[det].colnames:
                    noise_a = focalplane[det]["elevation_noise_a"]
                    noise_c = focalplane[det]["elevation_noise_c"]
                else:
                    local_net_factors[idet] = 1.0
                    local_tot_factors[idet] = 1.0
                    continue

                if self.modulate_pwv and self.pwv_a0 is not None:
                    pwv_a0 = self.pwv_a0
                    pwv_a1 = self.pwv_a1
                    pwv_a2 = self.pwv_a2
                    modulate_pwv = True
                elif self.modulate_pwv and "pwv_noise_a0" in focalplane[det].colnames:
                    pwv_a0 = focalplane[det]["pwv_noise_a0"]
                    pwv_a1 = focalplane[det]["pwv_noise_a1"]
                    pwv_a2 = focalplane[det]["pwv_noise_a2"]
                    modulate_pwv = True
                else:
                    modulate_pwv = False

                # Compute detector quaternions one detector at a time.
                self.detector_pointing.apply(obs_data, detectors=[det])

                el_view = list()
                for vw in range(len(views)):
                    # Detector elevation
                    theta, _, _ = qa.to_iso_angles(
                        views.detdata[self.detector_pointing.quats][vw][det]
                    )

                    # Apply flags and convert to elevation
                    if view_flags[vw] is None:
                        el_view.append(np.pi / 2 - theta)
                    else:
                        el_view.append(np.pi / 2 - theta[view_flags[vw] == 0])

                el = np.median(np.concatenate(el_view))

                # Compute the scaling factors

                net_factor = noise_a / np.sin(el) + noise_c
                local_net_factors[idet] = net_factor

                if modulate_pwv:
                    pwv = obs.telescope.site.weather.pwv.to_value(u.mm)
                    net_factor *= pwv_a0 + pwv_a1 * pwv + pwv_a2 * pwv**2

                if self.extra_factor is not None:
                    net_factor *= self.extra_factor

                local_tot_factors[idet] = net_factor**2

            # Restore the original detector pointing view
            self.detector_pointing.view = detector_pointing_view

            # Gather the PSD scale factors to the root process of the group
            # for calculating statistics later.
            if obs.comm_row_rank == 0:
                if obs.comm_col_size > 1:
                    all_net_factors = obs.comm_col.gather(local_net_factors, root=0)
                    all_tot_factors = obs.comm_col.gather(local_tot_factors, root=0)
                    all_rates = obs.comm_col.gather(local_rates, root=0)
                    all_weights_in = obs.comm_col.gather(local_weights_in, root=0)
                    if obs.comm_col_rank == 0:
                        for pnet, ptot, prate, pw in zip(
                            all_net_factors, all_tot_factors, all_rates, all_weights_in
                        ):
                            self.net_factors.extend(pnet.tolist())
                            self.total_factors.extend(ptot.tolist())
                            self.rates.extend(prate.tolist())
                            self.weights_in.extend(pw)
                else:
                    self.net_factors.extend(local_net_factors.tolist())
                    self.total_factors.extend(local_tot_factors.tolist())
                    self.rates.extend(local_rates.tolist())
                    self.weights_in.extend(local_weights_in)

            # Create a new base-class noise object with the same PSDs and
            # mixing matrix as the input.  Then modify those values.  If the
            # output name is the same as the input, then delete the input
            # and replace it with the new model.

            nse_keys = noise.keys
            nse_dets = noise.detectors
            nse_freqs = {x: noise.freq(x) for x in nse_keys}
            nse_psds = {x: noise.psd(x) for x in nse_keys}
            nse_indx = {x: noise.index(x) for x in nse_keys}
            out_noise = Noise(
                detectors=nse_dets,
                freqs=nse_freqs,
                psds=nse_psds,
                indices=nse_indx,
                mixmatrix=noise.mixing_matrix,
            )

            # Modify all psds first, since the first call to detector_weight()
            # will trigger the calculation for all detectors.
            for idet, det in enumerate(local_dets):
                out_noise.psd(det)[:] *= local_tot_factors[idet]

            local_weights_out = list()
            for idet, det in enumerate(local_dets):
                local_weights_out.append(out_noise.detector_weight(det))

            if obs.comm_row_rank == 0:
                if obs.comm_col_size > 1:
                    all_weights_out = obs.comm_col.gather(local_weights_out, root=0)
                    if obs.comm_col_rank == 0:
                        for pw in all_weights_out:
                            self.weights_out.extend(pw)
                else:
                    self.weights_out.extend(local_weights_out)

            if self.out_model is None or self.noise_model == self.out_model:
                # We are replacing the input
                del obs[self.noise_model]
                obs[self.noise_model] = out_noise
            else:
                # We are storing this in a new key
                obs[self.out_model] = out_noise
        return

    def _finalize(self, data, **kwargs):
        log = Logger.get()
        # Within a process group, all processes have a copy of the same information
        # for all detectors.  To build the global statistics, we just need to gather
        # data from the rank zero of all groups.

        if data.comm.group_rank == 0:
            net_factors = np.array(self.net_factors)
            total_factors = np.array(self.total_factors)
            wt_units = 1.0 / (u.K**2)
            weights_in = np.array([x.to_value(wt_units) for x in self.weights_in])
            weights_out = np.array([x.to_value(wt_units) for x in self.weights_out])
            rates = np.array(self.rates)
            rank_comm = data.comm.comm_group_rank
            if rank_comm is not None:
                net_factors = rank_comm.gather(net_factors)
                total_factors = rank_comm.gather(total_factors)
                weights_in = rank_comm.gather(weights_in)
                weights_out = rank_comm.gather(weights_out)
                rates = rank_comm.gather(rates)
                if rank_comm.rank == 0:
                    net_factors = np.hstack(net_factors)
                    total_factors = np.hstack(total_factors)
                    weights_in = np.hstack(weights_in)
                    weights_out = np.hstack(weights_out)
                    rates = np.hstack(rates)
            if data.comm.world_rank == 0 and len(net_factors) > 0:
                net = net_factors
                tot = total_factors
                net1 = np.sqrt(1 / weights_in / rates) * 1e6
                net2 = np.sqrt(1 / weights_out / rates) * 1e6
                log.info(
                    f"Elevation noise: \n"
                    f"  NET_factor: \n"
                    f"     min = {np.amin(net):8.3f},    max = {np.amax(net):8.3f}\n"
                    f"    mean = {np.mean(net):8.3f}, median = {np.median(net):8.3f}\n"
                    f"  TOTAL factor: \n"
                    f"     min = {np.amin(tot):8.3f},    max = {np.amax(tot):8.3f}\n"
                    f"    mean = {np.mean(tot):8.3f}, median = {np.median(tot):8.3f}\n"
                    f"  NET_in [uK root(s)]: \n"
                    f"     min = {np.amin(net1):8.1f},    max = {np.amax(net1):8.1f}\n"
                    f"    mean = {np.mean(net1):8.1f}, median = {np.median(net1):8.1f}\n"
                    f"  NET_out: [uK root(s)]\n"
                    f"     min = {np.amin(net2):8.1f},    max = {np.amax(net2):8.1f}\n"
                    f"    mean = {np.mean(net2):8.1f}, median = {np.median(net2):8.1f}\n"
                )
        return

    def _requires(self):
        req = self.detector_pointing.requires()
        req["meta"].append(self.noise_model)
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": list(),
            "intervals": list(),
        }
        if self.out_model is None:
            prov["meta"].append(self.out_model)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

detector_pointing = Instance(klass=Operator, allow_none=True, help='Operator that translates boresight Az / El pointing into detector frame') class-attribute instance-attribute

extra_factor = Float(None, allow_none=True, help='Extra multiplier to the NET scaling') class-attribute instance-attribute

modulate_pwv = Bool(False, help='If True, modulate the NET based on PWV') class-attribute instance-attribute

net_factors = [] instance-attribute

noise_a = Float(None, allow_none=True, help="Parameter 'a' in (a / sin(el) + c). If not set, look for one in the Focalplane.") class-attribute instance-attribute

noise_c = Float(None, allow_none=True, help="Parameter 'c' in (a / sin(el) + c). If not set, look for one in the Focalplane.") class-attribute instance-attribute

noise_model = Unicode('noise_model', help='The observation key containing the input noise model') class-attribute instance-attribute

out_model = Unicode(None, allow_none=True, help='Create a new noise model with this name') class-attribute instance-attribute

pwv_a0 = Float(None, allow_none=True, help="Parameter 'a0' in (a0 + pwv * a1 + pwv ** 2 * a2). If not set, look for one in the Focalplane.") class-attribute instance-attribute

pwv_a1 = Float(None, allow_none=True, help="Parameter 'a1' in (a0 + pwv * a1 + pwv ** 2 * a2). If not set, look for one in the Focalplane.") class-attribute instance-attribute

pwv_a2 = Float(None, allow_none=True, help="Parameter 'a2' in (a0 + pwv * a1 + pwv ** 2 * a2). If not set, look for one in the Focalplane.") class-attribute instance-attribute

rates = [] instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

total_factors = [] instance-attribute

view = Unicode(None, allow_none=True, help="Use this view of the data in all observations. Use 'middle' if the middle 10 seconds of each observation is enough to determine the effective observing elevation") class-attribute instance-attribute

weights_in = [] instance-attribute

weights_out = [] instance-attribute

__init__(**kwargs)

Source code in toast/ops/elevation_noise.py
141
142
143
144
145
146
147
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    self.net_factors = []
    self.total_factors = []
    self.weights_in = []
    self.weights_out = []
    self.rates = []

_check_detector_pointing(proposal)

Source code in toast/ops/elevation_noise.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
@traitlets.validate("detector_pointing")
def _check_detector_pointing(self, proposal):
    detpointing = proposal["value"]
    if detpointing is not None:
        if not isinstance(detpointing, Operator):
            raise traitlets.TraitError(
                "detector_pointing should be an Operator instance"
            )
        # Check that this operator has the traits we expect
        for trt in [
            "view",
            "boresight",
            "shared_flags",
            "shared_flag_mask",
            "quats",
            "coord_in",
            "coord_out",
        ]:
            if not detpointing.has_trait(trt):
                msg = f"detector_pointing operator should have a '{trt}' trait"
                raise traitlets.TraitError(msg)
    return detpointing

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/elevation_noise.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    if self.detector_pointing is None:
        msg = "You must set the detector_pointing trait before calling exec()"
        log.error(msg)
        raise RuntimeError(msg)

    if detectors is not None:
        msg = "You must run this operator on all detectors at once"
        log.error(msg)
        raise RuntimeError(msg)

    for obs in data.obs:
        if not obs.is_distributed_by_detector:
            msg = "Observation data must be distributed by detector, not samples"
            log.error(msg)
            raise RuntimeError(msg)
        obs_data = data.select(obs_uid=obs.uid)
        focalplane = obs.telescope.focalplane

        if self.view == "middle" and self.view not in obs.intervals:
            # Create a view that is just one minute in the middle
            length = 10.0  # in seconds
            times = obs.shared[self.times]
            t_start = times[0]
            t_stop = times[-1]
            t_middle = np.mean([t_start, t_stop])
            t_start = max(t_start, t_middle - length / 2)
            t_stop = min(t_stop, t_middle + length / 2)
            obs.intervals[self.view] = IntervalList(
                timestamps=times, timespans=[(t_start, t_stop)]
            )

        # Check that the noise model exists
        if self.noise_model not in obs or obs[self.noise_model] is None:
            msg = (
                "Noise model {self.noise_model} does not exist in "
                "observation {obs.name}"
            )
            raise RuntimeError(msg)

        # Check that the view in the detector pointing operator covers
        # all the samples needed by this operator

        view = self.view
        detector_pointing_view = self.detector_pointing.view
        if view is None:
            # Use the same data view as detector pointing
            view = self.detector_pointing.view
        elif self.detector_pointing.view is not None:
            # Check that our view is fully covered by detector pointing
            intervals = obs.intervals[self.view]
            detector_intervals = obs.intervals[self.detector_pointing.view]
            intersection = detector_intervals & intervals
            if intersection != intervals:
                msg = (
                    f"view {self.view} is not fully covered by valid "
                    "detector pointing"
                )
                raise RuntimeError(msg)
        self.detector_pointing.view = view

        noise = obs[self.noise_model]

        # We will be collectively building the scale factor for all detectors.
        # Allocate arrays for communication.

        local_dets = obs.select_local_detectors(flagmask=defaults.det_mask_invalid)

        local_net_factors = np.zeros(len(local_dets), dtype=np.float64)
        local_tot_factors = np.zeros(len(local_dets), dtype=np.float64)
        local_rates = np.zeros(len(local_dets), dtype=np.float64)
        local_weights_in = list()

        # We are building up a data product (a noise model) which has values for
        # all detectors.  For each detector we need to expand the detector pointing.
        # Since the contributions for all views contribute to the scaling for each
        # detector, we loop over detectors first and then views.

        views = obs.view[view]

        # The flags are common to all detectors, so we compute them once.

        view_flags = list()
        for vw in range(len(views)):
            # Get the flags if needed.  Use the same flags as detector pointing.
            flags = None
            if self.detector_pointing.shared_flags is not None:
                flags = np.array(
                    views.shared[self.detector_pointing.shared_flags][vw]
                )
                flags &= self.detector_pointing.shared_flag_mask
                # If there are no valid samples, ignore detector flags and
                # *hope* that we can still get an approximate elevation
                if np.all(flags != 0):
                    flags = None
            view_flags.append(flags)

        for idet, det in enumerate(local_dets):
            local_rates[idet] = focalplane.sample_rate.to_value(u.Hz)
            local_weights_in.append(noise.detector_weight(det))

            # If both the A and C values are unset, the noise model is not modified.
            if self.noise_a is not None:
                noise_a = self.noise_a
                noise_c = self.noise_c
            elif "elevation_noise_a" in focalplane[det].colnames:
                noise_a = focalplane[det]["elevation_noise_a"]
                noise_c = focalplane[det]["elevation_noise_c"]
            else:
                local_net_factors[idet] = 1.0
                local_tot_factors[idet] = 1.0
                continue

            if self.modulate_pwv and self.pwv_a0 is not None:
                pwv_a0 = self.pwv_a0
                pwv_a1 = self.pwv_a1
                pwv_a2 = self.pwv_a2
                modulate_pwv = True
            elif self.modulate_pwv and "pwv_noise_a0" in focalplane[det].colnames:
                pwv_a0 = focalplane[det]["pwv_noise_a0"]
                pwv_a1 = focalplane[det]["pwv_noise_a1"]
                pwv_a2 = focalplane[det]["pwv_noise_a2"]
                modulate_pwv = True
            else:
                modulate_pwv = False

            # Compute detector quaternions one detector at a time.
            self.detector_pointing.apply(obs_data, detectors=[det])

            el_view = list()
            for vw in range(len(views)):
                # Detector elevation
                theta, _, _ = qa.to_iso_angles(
                    views.detdata[self.detector_pointing.quats][vw][det]
                )

                # Apply flags and convert to elevation
                if view_flags[vw] is None:
                    el_view.append(np.pi / 2 - theta)
                else:
                    el_view.append(np.pi / 2 - theta[view_flags[vw] == 0])

            el = np.median(np.concatenate(el_view))

            # Compute the scaling factors

            net_factor = noise_a / np.sin(el) + noise_c
            local_net_factors[idet] = net_factor

            if modulate_pwv:
                pwv = obs.telescope.site.weather.pwv.to_value(u.mm)
                net_factor *= pwv_a0 + pwv_a1 * pwv + pwv_a2 * pwv**2

            if self.extra_factor is not None:
                net_factor *= self.extra_factor

            local_tot_factors[idet] = net_factor**2

        # Restore the original detector pointing view
        self.detector_pointing.view = detector_pointing_view

        # Gather the PSD scale factors to the root process of the group
        # for calculating statistics later.
        if obs.comm_row_rank == 0:
            if obs.comm_col_size > 1:
                all_net_factors = obs.comm_col.gather(local_net_factors, root=0)
                all_tot_factors = obs.comm_col.gather(local_tot_factors, root=0)
                all_rates = obs.comm_col.gather(local_rates, root=0)
                all_weights_in = obs.comm_col.gather(local_weights_in, root=0)
                if obs.comm_col_rank == 0:
                    for pnet, ptot, prate, pw in zip(
                        all_net_factors, all_tot_factors, all_rates, all_weights_in
                    ):
                        self.net_factors.extend(pnet.tolist())
                        self.total_factors.extend(ptot.tolist())
                        self.rates.extend(prate.tolist())
                        self.weights_in.extend(pw)
            else:
                self.net_factors.extend(local_net_factors.tolist())
                self.total_factors.extend(local_tot_factors.tolist())
                self.rates.extend(local_rates.tolist())
                self.weights_in.extend(local_weights_in)

        # Create a new base-class noise object with the same PSDs and
        # mixing matrix as the input.  Then modify those values.  If the
        # output name is the same as the input, then delete the input
        # and replace it with the new model.

        nse_keys = noise.keys
        nse_dets = noise.detectors
        nse_freqs = {x: noise.freq(x) for x in nse_keys}
        nse_psds = {x: noise.psd(x) for x in nse_keys}
        nse_indx = {x: noise.index(x) for x in nse_keys}
        out_noise = Noise(
            detectors=nse_dets,
            freqs=nse_freqs,
            psds=nse_psds,
            indices=nse_indx,
            mixmatrix=noise.mixing_matrix,
        )

        # Modify all psds first, since the first call to detector_weight()
        # will trigger the calculation for all detectors.
        for idet, det in enumerate(local_dets):
            out_noise.psd(det)[:] *= local_tot_factors[idet]

        local_weights_out = list()
        for idet, det in enumerate(local_dets):
            local_weights_out.append(out_noise.detector_weight(det))

        if obs.comm_row_rank == 0:
            if obs.comm_col_size > 1:
                all_weights_out = obs.comm_col.gather(local_weights_out, root=0)
                if obs.comm_col_rank == 0:
                    for pw in all_weights_out:
                        self.weights_out.extend(pw)
            else:
                self.weights_out.extend(local_weights_out)

        if self.out_model is None or self.noise_model == self.out_model:
            # We are replacing the input
            del obs[self.noise_model]
            obs[self.noise_model] = out_noise
        else:
            # We are storing this in a new key
            obs[self.out_model] = out_noise
    return

_finalize(data, **kwargs)

Source code in toast/ops/elevation_noise.py
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
def _finalize(self, data, **kwargs):
    log = Logger.get()
    # Within a process group, all processes have a copy of the same information
    # for all detectors.  To build the global statistics, we just need to gather
    # data from the rank zero of all groups.

    if data.comm.group_rank == 0:
        net_factors = np.array(self.net_factors)
        total_factors = np.array(self.total_factors)
        wt_units = 1.0 / (u.K**2)
        weights_in = np.array([x.to_value(wt_units) for x in self.weights_in])
        weights_out = np.array([x.to_value(wt_units) for x in self.weights_out])
        rates = np.array(self.rates)
        rank_comm = data.comm.comm_group_rank
        if rank_comm is not None:
            net_factors = rank_comm.gather(net_factors)
            total_factors = rank_comm.gather(total_factors)
            weights_in = rank_comm.gather(weights_in)
            weights_out = rank_comm.gather(weights_out)
            rates = rank_comm.gather(rates)
            if rank_comm.rank == 0:
                net_factors = np.hstack(net_factors)
                total_factors = np.hstack(total_factors)
                weights_in = np.hstack(weights_in)
                weights_out = np.hstack(weights_out)
                rates = np.hstack(rates)
        if data.comm.world_rank == 0 and len(net_factors) > 0:
            net = net_factors
            tot = total_factors
            net1 = np.sqrt(1 / weights_in / rates) * 1e6
            net2 = np.sqrt(1 / weights_out / rates) * 1e6
            log.info(
                f"Elevation noise: \n"
                f"  NET_factor: \n"
                f"     min = {np.amin(net):8.3f},    max = {np.amax(net):8.3f}\n"
                f"    mean = {np.mean(net):8.3f}, median = {np.median(net):8.3f}\n"
                f"  TOTAL factor: \n"
                f"     min = {np.amin(tot):8.3f},    max = {np.amax(tot):8.3f}\n"
                f"    mean = {np.mean(tot):8.3f}, median = {np.median(tot):8.3f}\n"
                f"  NET_in [uK root(s)]: \n"
                f"     min = {np.amin(net1):8.1f},    max = {np.amax(net1):8.1f}\n"
                f"    mean = {np.mean(net1):8.1f}, median = {np.median(net1):8.1f}\n"
                f"  NET_out: [uK root(s)]\n"
                f"     min = {np.amin(net2):8.1f},    max = {np.amax(net2):8.1f}\n"
                f"    mean = {np.mean(net2):8.1f}, median = {np.median(net2):8.1f}\n"
            )
    return

_provides()

Source code in toast/ops/elevation_noise.py
435
436
437
438
439
440
441
442
443
444
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": list(),
        "intervals": list(),
    }
    if self.out_model is None:
        prov["meta"].append(self.out_model)
    return prov

_requires()

Source code in toast/ops/elevation_noise.py
428
429
430
431
432
433
def _requires(self):
    req = self.detector_pointing.requires()
    req["meta"].append(self.noise_model)
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

toast.ops.SimNoise

Bases: Operator

Operator which generates noise timestreams.

This passes through each observation and every process generates data for its assigned samples. The observation unique ID is used in the random number generation.

This operator intentionally does not provide a "view" trait. To avoid discontinuities, the full observation must be simulated regardless of any data views that will be used for subsequent analysis.

Source code in toast/ops/sim_tod_noise.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
@trait_docs
class SimNoise(Operator):
    """Operator which generates noise timestreams.

    This passes through each observation and every process generates data
    for its assigned samples.  The observation unique ID is used in the random
    number generation.

    This operator intentionally does not provide a "view" trait.  To avoid
    discontinuities, the full observation must be simulated regardless of any data
    views that will be used for subsequent analysis.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    noise_model = Unicode(
        "noise_model", help="Observation key containing the noise model"
    )

    realization = Int(0, help="The noise realization index")

    component = Int(0, help="The noise component index")

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key for accumulating noise timestreams",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    serial = Bool(True, help="Use legacy serial implementation instead of batched")

    @traitlets.validate("realization")
    def _check_realization(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("realization index must be positive")
        return check

    @traitlets.validate("component")
    def _check_component(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("component index must be positive")
        return check

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._oversample = 2

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            # Unique session ID
            sindx = ob.session.uid

            # Telescope UID
            telescope = ob.telescope.uid

            if self.noise_model not in ob:
                msg = "Observation does not contain noise model key '{}'".format(
                    self.noise_model
                )
                log.error(msg)
                raise KeyError(msg)

            nse = ob[self.noise_model]

            # Eventually we'll redistribute, to allow long correlations...
            if not ob.is_distributed_by_detector:
                msg = "Noise simulation for process grids with multiple ranks in the sample direction not implemented"
                log.error(msg)
                raise NotImplementedError(msg)

            # The previous code verified that a single process has whole
            # detectors within the observation...

            # Make sure correct output exists
            exists = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )

            # The units of the output timestream
            data_units = ob.detdata[self.det_data].units

            # The target units of the PSD needed to produce the timestream units
            sim_units = data_units**2 * u.second

            # Get the sample rate from the data.  We also have nominal sample rates
            # from the noise model and also from the focalplane.  Perhaps we should add
            # a check here that they are all consistent.
            (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(
                ob.shared[self.times].data
            )

            if self.serial:
                # Original serial implementation (for testing / comparison)
                for key in nse.all_keys_for_dets(dets):
                    # Simulate the noise matching this key
                    nsedata = sim_noise_timestream(
                        realization=self.realization,
                        telescope=telescope,
                        component=self.component,
                        sindx=sindx,
                        detindx=nse.index(key),
                        rate=rate,
                        firstsamp=ob.local_index_offset,
                        samples=ob.n_local_samples,
                        oversample=self._oversample,
                        freq=nse.freq(key).to_value(u.Hz),
                        psd=nse.psd(key).to_value(sim_units),
                        py=False,
                    )

                    # Add the noise to all detectors that have nonzero weights
                    for det in dets:
                        weight = nse.weight(det, key)
                        if weight == 0:
                            continue
                        ob.detdata[self.det_data][det] += weight * nsedata.array()

                    nsedata.clear()
                    del nsedata

                # Release the work space allocated in the FFT plan store.
                store = FFTPlanReal1DStore.get()
                store.clear()
            else:
                # Build up the list of noise stream indices and verify that the
                # frequency data for all psds is consistent.
                strm_names = list()
                freq_zero = nse.freq(nse.keys[0])
                for ikey, key in enumerate(nse.keys):
                    weight = 0.0
                    for det in dets:
                        weight += np.abs(nse.weight(det, key))
                    if weight == 0:
                        continue
                    test_freq = nse.freq(key)
                    if (
                        len(test_freq) != len(freq_zero)
                        or test_freq[0] != freq_zero[0]
                        or test_freq[-1] != freq_zero[-1]
                    ):
                        msg = "All psds must have the same frequency values"
                        log.error(msg)
                        raise RuntimeError(msg)
                    strm_names.append(key)

                freq = AlignedF64(len(freq_zero))
                freq[:] = freq_zero.to_value(u.Hz)

                strmindices = np.array(
                    [nse.index(x) for x in strm_names], dtype=np.uint64
                )

                psdbuf = AlignedF64(len(freq_zero) * len(strmindices))
                psds = psdbuf.array().reshape((len(strmindices), len(freq_zero)))
                for ikey, key in enumerate(strm_names):
                    psds[ikey][:] = nse.psd(key).to_value(sim_units)

                noisebuf = AlignedF64(ob.n_local_samples * len(strmindices))
                noise = noisebuf.array().reshape((len(strmindices), ob.n_local_samples))

                tod_sim_noise_timestream_batch(
                    self.realization,
                    telescope,
                    self.component,
                    sindx,
                    rate,
                    ob.local_index_offset,
                    self._oversample,
                    strmindices,
                    freq,
                    psds,
                    noise,
                )

                del psds
                psdbuf.clear()
                del psdbuf

                freq.clear()
                del freq

                # Add the noise to all detectors that have nonzero weights
                for ikey, key in enumerate(strm_names):
                    for det in dets:
                        weight = nse.weight(det, key)
                        if weight == 0:
                            continue
                        ob.detdata[self.det_data][det] += weight * noise[ikey]

                del noise
                noisebuf.clear()
                del noisebuf

                # Save memory by clearing the fft plans
                store = FFTPlanReal1DStore.get()
                store.clear()

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        return {
            "meta": [
                self.noise_model,
            ],
            "shared": [
                self.times,
            ],
            "detdata": [
                self.det_data,
            ],
        }

    def _provides(self):
        return {
            "detdata": [
                self.det_data,
            ]
        }

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

_oversample = 2 instance-attribute

component = Int(0, help='The noise component index') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key for accumulating noise timestreams') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

noise_model = Unicode('noise_model', help='Observation key containing the noise model') class-attribute instance-attribute

realization = Int(0, help='The noise realization index') class-attribute instance-attribute

serial = Bool(True, help='Use legacy serial implementation instead of batched') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_tod_noise.py
244
245
246
def __init__(self, **kwargs):
    super().__init__(**kwargs)
    self._oversample = 2

_check_component(proposal)

Source code in toast/ops/sim_tod_noise.py
237
238
239
240
241
242
@traitlets.validate("component")
def _check_component(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("component index must be positive")
    return check

_check_realization(proposal)

Source code in toast/ops/sim_tod_noise.py
230
231
232
233
234
235
@traitlets.validate("realization")
def _check_realization(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("realization index must be positive")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_tod_noise.py
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        # Unique session ID
        sindx = ob.session.uid

        # Telescope UID
        telescope = ob.telescope.uid

        if self.noise_model not in ob:
            msg = "Observation does not contain noise model key '{}'".format(
                self.noise_model
            )
            log.error(msg)
            raise KeyError(msg)

        nse = ob[self.noise_model]

        # Eventually we'll redistribute, to allow long correlations...
        if not ob.is_distributed_by_detector:
            msg = "Noise simulation for process grids with multiple ranks in the sample direction not implemented"
            log.error(msg)
            raise NotImplementedError(msg)

        # The previous code verified that a single process has whole
        # detectors within the observation...

        # Make sure correct output exists
        exists = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )

        # The units of the output timestream
        data_units = ob.detdata[self.det_data].units

        # The target units of the PSD needed to produce the timestream units
        sim_units = data_units**2 * u.second

        # Get the sample rate from the data.  We also have nominal sample rates
        # from the noise model and also from the focalplane.  Perhaps we should add
        # a check here that they are all consistent.
        (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(
            ob.shared[self.times].data
        )

        if self.serial:
            # Original serial implementation (for testing / comparison)
            for key in nse.all_keys_for_dets(dets):
                # Simulate the noise matching this key
                nsedata = sim_noise_timestream(
                    realization=self.realization,
                    telescope=telescope,
                    component=self.component,
                    sindx=sindx,
                    detindx=nse.index(key),
                    rate=rate,
                    firstsamp=ob.local_index_offset,
                    samples=ob.n_local_samples,
                    oversample=self._oversample,
                    freq=nse.freq(key).to_value(u.Hz),
                    psd=nse.psd(key).to_value(sim_units),
                    py=False,
                )

                # Add the noise to all detectors that have nonzero weights
                for det in dets:
                    weight = nse.weight(det, key)
                    if weight == 0:
                        continue
                    ob.detdata[self.det_data][det] += weight * nsedata.array()

                nsedata.clear()
                del nsedata

            # Release the work space allocated in the FFT plan store.
            store = FFTPlanReal1DStore.get()
            store.clear()
        else:
            # Build up the list of noise stream indices and verify that the
            # frequency data for all psds is consistent.
            strm_names = list()
            freq_zero = nse.freq(nse.keys[0])
            for ikey, key in enumerate(nse.keys):
                weight = 0.0
                for det in dets:
                    weight += np.abs(nse.weight(det, key))
                if weight == 0:
                    continue
                test_freq = nse.freq(key)
                if (
                    len(test_freq) != len(freq_zero)
                    or test_freq[0] != freq_zero[0]
                    or test_freq[-1] != freq_zero[-1]
                ):
                    msg = "All psds must have the same frequency values"
                    log.error(msg)
                    raise RuntimeError(msg)
                strm_names.append(key)

            freq = AlignedF64(len(freq_zero))
            freq[:] = freq_zero.to_value(u.Hz)

            strmindices = np.array(
                [nse.index(x) for x in strm_names], dtype=np.uint64
            )

            psdbuf = AlignedF64(len(freq_zero) * len(strmindices))
            psds = psdbuf.array().reshape((len(strmindices), len(freq_zero)))
            for ikey, key in enumerate(strm_names):
                psds[ikey][:] = nse.psd(key).to_value(sim_units)

            noisebuf = AlignedF64(ob.n_local_samples * len(strmindices))
            noise = noisebuf.array().reshape((len(strmindices), ob.n_local_samples))

            tod_sim_noise_timestream_batch(
                self.realization,
                telescope,
                self.component,
                sindx,
                rate,
                ob.local_index_offset,
                self._oversample,
                strmindices,
                freq,
                psds,
                noise,
            )

            del psds
            psdbuf.clear()
            del psdbuf

            freq.clear()
            del freq

            # Add the noise to all detectors that have nonzero weights
            for ikey, key in enumerate(strm_names):
                for det in dets:
                    weight = nse.weight(det, key)
                    if weight == 0:
                        continue
                    ob.detdata[self.det_data][det] += weight * noise[ikey]

            del noise
            noisebuf.clear()
            del noisebuf

            # Save memory by clearing the fft plans
            store = FFTPlanReal1DStore.get()
            store.clear()

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_tod_noise.py
409
410
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_tod_noise.py
425
426
427
428
429
430
def _provides(self):
    return {
        "detdata": [
            self.det_data,
        ]
    }

_requires()

Source code in toast/ops/sim_tod_noise.py
412
413
414
415
416
417
418
419
420
421
422
423
def _requires(self):
    return {
        "meta": [
            self.noise_model,
        ],
        "shared": [
            self.times,
        ],
        "detdata": [
            self.det_data,
        ],
    }

toast.ops.CommonModeNoise

Bases: Operator

Modify noise model to include common modes

If the output model is not specified, then the input is modified in place.

Source code in toast/ops/common_mode_noise.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
@trait_docs
class CommonModeNoise(Operator):
    """Modify noise model to include common modes

    If the output model is not specified, then the input is modified in place.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    realization = Int(0, help="The noise component index")

    component = Int(0, help="The noise component index")

    noise_model = Unicode(
        "noise_model", help="The observation key containing the input noise model"
    )

    out_model = Unicode(
        None, allow_none=True, help="Create a new noise model with this name"
    )

    focalplane_key = Unicode(
        None,
        allow_none=True,
        help="Detectors sharing the focalplane key will have the same common mode",
    )

    detset = List(
        [],
        help="List of detectors to add the common mode to.  "
        "Only used if `focalplane_key` is None",
    )

    fmin = Quantity(
        None,
        allow_none=True,
        help="",
    )

    fknee = Quantity(
        None,
        allow_none=True,
        help="",
    )

    alpha = Float(
        None,
        allow_none=True,
        help="",
    )

    NET = Quantity(
        None,
        allow_none=True,
        help="",
    )

    coupling_strength_center = Float(
        1,
        help="Mean coupling strength between the detectors and the common mode",
    )

    coupling_strength_width = Float(
        0,
        help="Width of the coupling strength distribution "
        "about `coupling_strength_center`",
    )

    static_coupling = Bool(
        False,
        help="If True, coupling to the common mode is not randomized over "
        "observations and realizations",
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        for trait in ("fmin", "fknee", "alpha", "NET"):
            if getattr(self, trait) is None:
                msg = f"You must set the '{trait}' trait before calling exec()"
                raise RuntimeError(msg)

        if detectors is not None:
            msg = "You must run this operator on all detectors at once"
            log.error(msg)
            raise RuntimeError(msg)

        for obs in data.obs:
            if not obs.is_distributed_by_detector:
                msg = "Observation data must be distributed by detector, not samples"
                log.error(msg)
                raise RuntimeError(msg)
            focalplane = obs.telescope.focalplane
            fsample = focalplane.sample_rate.to_value(u.Hz)

            # Check that the noise model exists
            if self.noise_model not in obs:
                msg = f"Noise model {self.noise_model} does not exist in "
                msg += f"observation {obs.name}"
                raise RuntimeError(msg)

            noise = obs[self.noise_model]
            # The noise simulation tools require frequencies to agree
            freqs = noise.freq(noise.keys[0]).to_value(u.Hz)

            # Find the unique values of focalplane keys

            dets_by_key = {}
            if self.focalplane_key is None:
                dets_by_key[None] = []
                for det in obs.all_detectors:
                    if len(self.detset) != 0 and det not in self.detset:
                        continue
                    dets_by_key[None].append(det)
            else:
                if self.focalplane_key not in focalplane.detector_data.colnames:
                    msg = f"Focalplane does not have column for '{self.focalplane_key}'.  "
                    msg += f"Available columns are {focalplane.detector_data.colnames}"
                    raise RuntimeError(msg)
                for det in obs.all_detectors:
                    key = focalplane[det][self.focalplane_key]
                    if key not in dets_by_key:
                        dets_by_key[key] = []
                    dets_by_key[key].append(det)

            # Create a new base-class noise object with the same PSDs and
            # mixing matrix as the input.  Then modify those values.  If the
            # output name is the same as the input, then delete the input
            # and replace it with the new model.

            nse_keys = noise.keys
            nse_dets = noise.detectors
            nse_freqs = {x: noise.freq(x) for x in nse_keys}
            nse_psds = {x: noise.psd(x) for x in nse_keys}
            nse_indx = {x: noise.index(x) for x in nse_keys}
            mixing_matrix = noise.mixing_matrix

            # Add the common mode noise PSDs

            fmin = self.fmin.to_value(u.Hz)
            fknee = self.fknee.to_value(u.Hz)
            alpha = self.alpha
            net = self.NET

            if self.static_coupling:
                obs_id = 0
                realization = 0
            else:
                obs_id = obs.uid
                realization = self.realization

            for key, dets in dets_by_key.items():
                if key is None:
                    noise_key = f"{self.name}_{self.component}"
                else:
                    noise_key = f"{self.name}_{self.component}_{key}"
                mixing_matrix[noise_key] = {}
                noise_uid = name_UID(noise_key)
                nse_keys.append(noise_key)
                nse_freqs[noise_key] = freqs * u.Hz
                nse_psds[noise_key] = net**2 * (
                    (freqs**alpha + fknee**alpha) / (freqs**alpha + fmin**alpha)
                )
                nse_indx[noise_key] = noise_uid

                # Draw coupling strengths and record them in the mixing matrix
                for det in dets:
                    key1 = noise_uid + obs.telescope.uid * 3956215
                    key2 = obs_id
                    counter1 = realization
                    counter2 = focalplane[det]["uid"]
                    gaussian = rng.random(
                        1,
                        sampler="gaussian",
                        key=(key1, key2),
                        counter=(counter1, counter2),
                    )[0]
                    coupling = (
                        self.coupling_strength_center
                        + gaussian * self.coupling_strength_width
                    )
                    mixing_matrix[det][noise_key] = coupling

            out_noise = Noise(
                detectors=nse_dets,
                freqs=nse_freqs,
                psds=nse_psds,
                indices=nse_indx,
                mixmatrix=mixing_matrix,
            )

            if self.out_model is None or self.noise_model == self.out_model:
                # We are replacing the input
                del obs[self.noise_model]
                obs[self.noise_model] = out_noise
            else:
                # We are storing this in a new key
                obs[self.out_model] = out_noise
        return

    def _finalize(self, data, **kwargs):
        return
        return

    def _requires(self):
        req = {"meta": self.noise_model}
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": list(),
            "intervals": list(),
        }
        if self.out_model is None:
            prov["meta"].append(self.out_model)
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

NET = Quantity(None, allow_none=True, help='') class-attribute instance-attribute

alpha = Float(None, allow_none=True, help='') class-attribute instance-attribute

component = Int(0, help='The noise component index') class-attribute instance-attribute

coupling_strength_center = Float(1, help='Mean coupling strength between the detectors and the common mode') class-attribute instance-attribute

coupling_strength_width = Float(0, help='Width of the coupling strength distribution about `coupling_strength_center`') class-attribute instance-attribute

detset = List([], help='List of detectors to add the common mode to. Only used if `focalplane_key` is None') class-attribute instance-attribute

fknee = Quantity(None, allow_none=True, help='') class-attribute instance-attribute

fmin = Quantity(None, allow_none=True, help='') class-attribute instance-attribute

focalplane_key = Unicode(None, allow_none=True, help='Detectors sharing the focalplane key will have the same common mode') class-attribute instance-attribute

noise_model = Unicode('noise_model', help='The observation key containing the input noise model') class-attribute instance-attribute

out_model = Unicode(None, allow_none=True, help='Create a new noise model with this name') class-attribute instance-attribute

realization = Int(0, help='The noise component index') class-attribute instance-attribute

static_coupling = Bool(False, help='If True, coupling to the common mode is not randomized over observations and realizations') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/common_mode_noise.py
 99
100
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/common_mode_noise.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    for trait in ("fmin", "fknee", "alpha", "NET"):
        if getattr(self, trait) is None:
            msg = f"You must set the '{trait}' trait before calling exec()"
            raise RuntimeError(msg)

    if detectors is not None:
        msg = "You must run this operator on all detectors at once"
        log.error(msg)
        raise RuntimeError(msg)

    for obs in data.obs:
        if not obs.is_distributed_by_detector:
            msg = "Observation data must be distributed by detector, not samples"
            log.error(msg)
            raise RuntimeError(msg)
        focalplane = obs.telescope.focalplane
        fsample = focalplane.sample_rate.to_value(u.Hz)

        # Check that the noise model exists
        if self.noise_model not in obs:
            msg = f"Noise model {self.noise_model} does not exist in "
            msg += f"observation {obs.name}"
            raise RuntimeError(msg)

        noise = obs[self.noise_model]
        # The noise simulation tools require frequencies to agree
        freqs = noise.freq(noise.keys[0]).to_value(u.Hz)

        # Find the unique values of focalplane keys

        dets_by_key = {}
        if self.focalplane_key is None:
            dets_by_key[None] = []
            for det in obs.all_detectors:
                if len(self.detset) != 0 and det not in self.detset:
                    continue
                dets_by_key[None].append(det)
        else:
            if self.focalplane_key not in focalplane.detector_data.colnames:
                msg = f"Focalplane does not have column for '{self.focalplane_key}'.  "
                msg += f"Available columns are {focalplane.detector_data.colnames}"
                raise RuntimeError(msg)
            for det in obs.all_detectors:
                key = focalplane[det][self.focalplane_key]
                if key not in dets_by_key:
                    dets_by_key[key] = []
                dets_by_key[key].append(det)

        # Create a new base-class noise object with the same PSDs and
        # mixing matrix as the input.  Then modify those values.  If the
        # output name is the same as the input, then delete the input
        # and replace it with the new model.

        nse_keys = noise.keys
        nse_dets = noise.detectors
        nse_freqs = {x: noise.freq(x) for x in nse_keys}
        nse_psds = {x: noise.psd(x) for x in nse_keys}
        nse_indx = {x: noise.index(x) for x in nse_keys}
        mixing_matrix = noise.mixing_matrix

        # Add the common mode noise PSDs

        fmin = self.fmin.to_value(u.Hz)
        fknee = self.fknee.to_value(u.Hz)
        alpha = self.alpha
        net = self.NET

        if self.static_coupling:
            obs_id = 0
            realization = 0
        else:
            obs_id = obs.uid
            realization = self.realization

        for key, dets in dets_by_key.items():
            if key is None:
                noise_key = f"{self.name}_{self.component}"
            else:
                noise_key = f"{self.name}_{self.component}_{key}"
            mixing_matrix[noise_key] = {}
            noise_uid = name_UID(noise_key)
            nse_keys.append(noise_key)
            nse_freqs[noise_key] = freqs * u.Hz
            nse_psds[noise_key] = net**2 * (
                (freqs**alpha + fknee**alpha) / (freqs**alpha + fmin**alpha)
            )
            nse_indx[noise_key] = noise_uid

            # Draw coupling strengths and record them in the mixing matrix
            for det in dets:
                key1 = noise_uid + obs.telescope.uid * 3956215
                key2 = obs_id
                counter1 = realization
                counter2 = focalplane[det]["uid"]
                gaussian = rng.random(
                    1,
                    sampler="gaussian",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )[0]
                coupling = (
                    self.coupling_strength_center
                    + gaussian * self.coupling_strength_width
                )
                mixing_matrix[det][noise_key] = coupling

        out_noise = Noise(
            detectors=nse_dets,
            freqs=nse_freqs,
            psds=nse_psds,
            indices=nse_indx,
            mixmatrix=mixing_matrix,
        )

        if self.out_model is None or self.noise_model == self.out_model:
            # We are replacing the input
            del obs[self.noise_model]
            obs[self.noise_model] = out_noise
        else:
            # We are storing this in a new key
            obs[self.out_model] = out_noise
    return

_finalize(data, **kwargs)

Source code in toast/ops/common_mode_noise.py
229
230
231
def _finalize(self, data, **kwargs):
    return
    return

_provides()

Source code in toast/ops/common_mode_noise.py
237
238
239
240
241
242
243
244
245
246
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": list(),
        "intervals": list(),
    }
    if self.out_model is None:
        prov["meta"].append(self.out_model)
    return prov

_requires()

Source code in toast/ops/common_mode_noise.py
233
234
235
def _requires(self):
    req = {"meta": self.noise_model}
    return req

toast.ops.TimeConstant

Bases: Operator

Simple time constant filtering without flag checks.

Source code in toast/ops/time_constant.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
@trait_docs
class TimeConstant(Operator):
    """Simple time constant filtering without flag checks."""

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data,
        help="Observation detdata key apply filtering to",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    tau = Quantity(
        None,
        allow_none=True,
        help="Time constant to apply to all detectors.  Overrides `tau_name`",
    )

    tau_sigma = Float(
        None,
        allow_none=True,
        help="Randomized fractional error to add to each time constant.",
    )

    tau_name = Unicode(
        None,
        allow_none=True,
        help="Key to use to find time constants in the Focalplane.",
    )

    tau_flag_mask = Int(
        defaults.det_mask_invalid,
        help="Detector flag mask for cutting detectors with invalid Tau values.",
    )

    edge_flag_mask = Int(
        defaults.det_mask_invalid,
        help="Sample flag mask for cutting samples at the ends due to filter effects.",
    )

    batch = Bool(False, help="If True, batch all detectors and process at once")

    deconvolve = Bool(False, help="Deconvolve the time constant instead.")

    realization = Int(0, help="Realization ID, only used if tau_sigma is nonzero")

    debug = Unicode(
        None,
        allow_none=True,
        help="Path to directory for generating debug plots",
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def _get_tau(self, obs, det):
        focalplane = obs.telescope.focalplane
        if self.tau is None:
            tau = focalplane[det][self.tau_name]
            try:
                tau = tau.to(u.second)
            except AttributeError:
                # The value is just a float in seconds (or NaN)
                tau = tau * u.second
        else:
            tau = self.tau
        if self.tau_sigma:
            # randomize tau in a reproducible manner
            counter1 = obs.session.uid
            counter2 = self.realization
            key1 = focalplane[det]["uid"]
            key2 = 123456

            x = rng.random(
                1,
                sampler="gaussian",
                key=(key1, key2),
                counter=(counter1, counter2),
            )[0]
            tau = tau * (1 + x * self.tau_sigma)
        return tau

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        env = Environment.get()
        log = Logger.get()

        if self.tau is None and self.tau_name is None:
            raise RuntimeError("Either tau or tau_name must be set.")

        for obs in data.obs:
            dets = obs.select_local_detectors(detectors, flagmask=self.tau_flag_mask)
            if len(dets) == 0:
                continue

            fsample = obs.telescope.focalplane.sample_rate.to_value(u.Hz)

            # Get the timeconstants for all detectors
            tau_det = dict()
            for idet, det in enumerate(dets):
                tau_det[idet] = self._get_tau(obs, det)

            def _filter_kernel(indx, kfreqs):
                """Function to generate the filter kernel on demand.

                Our complex filter kernel is:
                    1 + j * (2 * pi * tau * freqs)

                """
                tau = tau_det[indx].to_value(u.second)
                kernel = np.zeros(len(kfreqs), dtype=np.complex128)
                kernel.real[:] = 1
                kernel.imag[:] = 2.0 * np.pi * tau * kfreqs
                return kernel

            # The slice of detector data we will use
            signal = obs.detdata[self.det_data][dets, :]

            if self.batch:
                # Use the internal batched (threaded) implementation.  This
                # is likely faster, but at the cost of memory use equal to
                # at least 8 times the detector timestream memory for
                # a given observation.
                algo = "internal"
            else:
                # Use numpy, one detector at a time.
                algo = "numpy"

            if self.debug is not None:
                debug_root = os.path.join(self.debug, f"{self.name}_{algo}")
            else:
                debug_root = None

            convolve(
                signal,
                fsample,
                kernel_func=_filter_kernel,
                deconvolve=self.deconvolve,
                algorithm=algo,
                debug=debug_root,
            )

            # Flag 5 time-constants of data at the beginning and end
            for idet, det in enumerate(dets):
                tau = tau_det[idet].to_value(u.second)
                n_edge = int(5 * tau * fsample)
                if n_edge == 0:
                    continue
                obs.detdata[self.det_flags][det][:n_edge] |= self.edge_flag_mask
                obs.detdata[self.det_flags][det][-n_edge:] |= self.edge_flag_mask

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "shared": list(),
            "detdata": [self.det_data],
        }
        return req

    def _provides(self):
        return dict()

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

batch = Bool(False, help='If True, batch all detectors and process at once') class-attribute instance-attribute

debug = Unicode(None, allow_none=True, help='Path to directory for generating debug plots') class-attribute instance-attribute

deconvolve = Bool(False, help='Deconvolve the time constant instead.') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key apply filtering to') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

edge_flag_mask = Int(defaults.det_mask_invalid, help='Sample flag mask for cutting samples at the ends due to filter effects.') class-attribute instance-attribute

realization = Int(0, help='Realization ID, only used if tau_sigma is nonzero') class-attribute instance-attribute

tau = Quantity(None, allow_none=True, help='Time constant to apply to all detectors. Overrides `tau_name`') class-attribute instance-attribute

tau_flag_mask = Int(defaults.det_mask_invalid, help='Detector flag mask for cutting detectors with invalid Tau values.') class-attribute instance-attribute

tau_name = Unicode(None, allow_none=True, help='Key to use to find time constants in the Focalplane.') class-attribute instance-attribute

tau_sigma = Float(None, allow_none=True, help='Randomized fractional error to add to each time constant.') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/time_constant.py
79
80
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/time_constant.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    env = Environment.get()
    log = Logger.get()

    if self.tau is None and self.tau_name is None:
        raise RuntimeError("Either tau or tau_name must be set.")

    for obs in data.obs:
        dets = obs.select_local_detectors(detectors, flagmask=self.tau_flag_mask)
        if len(dets) == 0:
            continue

        fsample = obs.telescope.focalplane.sample_rate.to_value(u.Hz)

        # Get the timeconstants for all detectors
        tau_det = dict()
        for idet, det in enumerate(dets):
            tau_det[idet] = self._get_tau(obs, det)

        def _filter_kernel(indx, kfreqs):
            """Function to generate the filter kernel on demand.

            Our complex filter kernel is:
                1 + j * (2 * pi * tau * freqs)

            """
            tau = tau_det[indx].to_value(u.second)
            kernel = np.zeros(len(kfreqs), dtype=np.complex128)
            kernel.real[:] = 1
            kernel.imag[:] = 2.0 * np.pi * tau * kfreqs
            return kernel

        # The slice of detector data we will use
        signal = obs.detdata[self.det_data][dets, :]

        if self.batch:
            # Use the internal batched (threaded) implementation.  This
            # is likely faster, but at the cost of memory use equal to
            # at least 8 times the detector timestream memory for
            # a given observation.
            algo = "internal"
        else:
            # Use numpy, one detector at a time.
            algo = "numpy"

        if self.debug is not None:
            debug_root = os.path.join(self.debug, f"{self.name}_{algo}")
        else:
            debug_root = None

        convolve(
            signal,
            fsample,
            kernel_func=_filter_kernel,
            deconvolve=self.deconvolve,
            algorithm=algo,
            debug=debug_root,
        )

        # Flag 5 time-constants of data at the beginning and end
        for idet, det in enumerate(dets):
            tau = tau_det[idet].to_value(u.second)
            n_edge = int(5 * tau * fsample)
            if n_edge == 0:
                continue
            obs.detdata[self.det_flags][det][:n_edge] |= self.edge_flag_mask
            obs.detdata[self.det_flags][det][-n_edge:] |= self.edge_flag_mask

_finalize(data, **kwargs)

Source code in toast/ops/time_constant.py
178
179
def _finalize(self, data, **kwargs):
    return

_get_tau(obs, det)

Source code in toast/ops/time_constant.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def _get_tau(self, obs, det):
    focalplane = obs.telescope.focalplane
    if self.tau is None:
        tau = focalplane[det][self.tau_name]
        try:
            tau = tau.to(u.second)
        except AttributeError:
            # The value is just a float in seconds (or NaN)
            tau = tau * u.second
    else:
        tau = self.tau
    if self.tau_sigma:
        # randomize tau in a reproducible manner
        counter1 = obs.session.uid
        counter2 = self.realization
        key1 = focalplane[det]["uid"]
        key2 = 123456

        x = rng.random(
            1,
            sampler="gaussian",
            key=(key1, key2),
            counter=(counter1, counter2),
        )[0]
        tau = tau * (1 + x * self.tau_sigma)
    return tau

_provides()

Source code in toast/ops/time_constant.py
188
189
def _provides(self):
    return dict()

_requires()

Source code in toast/ops/time_constant.py
181
182
183
184
185
186
def _requires(self):
    req = {
        "shared": list(),
        "detdata": [self.det_data],
    }
    return req

toast.ops.InjectCosmicRays

Bases: Operator

Inject the cosmic rays signal into the TOD. So far we inject two kinds of cosmic ray noise:

Wafer noise, due to ~400 impacts per second in the wafer undistinguishable individually. For each observation and for each detector we inject low noise component as a white noise signal, i.e. noraml distributed random samples following the observed properties from simulations and read from disc. This component is then coadded to the sky signal (as if it were a noise term) .

Common mode noise

A common mode noise within each detector pair can be simulated given the properties of the wafer noise. Will use the informations of correlations can be found in the file provided as an input to the simulations, if present, otherwise 50% detector correlation is assumed.

Direct hits (or Glitches)

Given the size of the detector we can derive the cosmic ray event rate and simulate the profile of a cosmic ray glitch. We assume the glitch to be described as

.. math:: \gamma (t) = C_1 +C_2 e^{-t/ au }

where :math:C_1 and :math:C_2 and the time constant :math: au are drawn from a distribution of estimated values from simulations. For each observation and each detector, we estimate the number of hits expected theroretically and draw a random integer, N, with a Poissonian distribution given the expected number of events, Nexp. We then select randomly N timestamps where the hits will be injected into the tod simulated in TOAST. Evaluate the function :math:\gamma at a higher sampling rate (~150 Hz), decimate it to the TOD sample rate and coadd it.

Parameters:

Name Type Description Default
crfile string

A *.npz file encoding 4 attributes, low_noise (mean and std. dev. of the wafer noise) sampling_rate sampling rate of the glitch simulations direct_hits distribution of the glitch parameters correlation_matrix correlation matrix for common mode must have a tag {detector} that will be replaced with the detector index.

required
signal_name string

the cache reference of the TOD data where the cosmic ray will be stored

required
realization int

to run several Monte-Carlo realizations of cosmic ray noise

required
eventrate float)

the expected event rate of hits in a detector

required
inject_direct_hits bool

will include also direct hits if set to True

required
conversion_factor float

factor to convert the cosmic ray units to temperature units

required
common_mode bool)

will include also common mode per pixel pair if set to True

required
Source code in toast/ops/sim_cosmic_rays.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
class InjectCosmicRays(Operator):
    """
    Inject the cosmic rays  signal into the TOD. So far we inject two kinds of cosmic ray noise:

    Wafer noise, due to ~400 impacts per second in the wafer   undistinguishable  individually.
    For each observation and for each detector we inject low noise component as a _white noise signal_,
    i.e. noraml distributed  random samples following the observed properties from simulations and read from disc.
    This component is then coadded to the sky signal (as if it were a noise term) .

    Common mode noise

    A common mode  noise within each detector pair can be simulated given the properties of the wafer noise.
    Will use the informations of correlations can be found in the file provided as an input to the simulations, if
    present, otherwise 50% detector correlation is assumed.

    Direct  hits (or Glitches)

    Given the size of the detector we can derive the cosmic ray event rate and simulate the profile of a cosmic ray glitch.
    We assume the glitch to be described as

    .. math::
    \gamma (t) = C_1 +C_2 e^{-t/\tau }

    where :math:C_1 and :math:C_2 and the time constant :math:\tau  are drawn from a distribution of estimated values
    from simulations.   For each observation and each detector, we estimate the number of hits expected
    theroretically and draw a random integer, `N`,  with a Poissonian distribution given the expected number
    of events, `Nexp`.  We then  select randomly `N` timestamps where   the hits will be injected into the tod simulated in TOAST.
    Evaluate the function :math:\gamma at a higher sampling rate (~150 Hz), decimate it to the TOD sample rate and coadd  it.

    Args:
        crfile (string):  A `*.npz`  file encoding 4 attributes,
            `low_noise` (mean and std. dev. of  the wafer noise)
            `sampling_rate` sampling rate of the glitch simulations
            `direct_hits` distribution of the glitch parameters
            `correlation_matrix` correlation matrix for common mode
            must have a tag {detector} that will be replaced with the detector index.
        signal_name (string): the cache reference of the TOD data where the cosmic ray will be stored
        realization (int): to run several    Monte-Carlo realizations of cosmic ray noise
        eventrate (float) : the expected event rate of hits in a detector
        inject_direct_hits (bool): will include also direct hits if set to True
        conversion_factor (float): factor to convert the cosmic ray units to temperature units
        common_mode (bool) :  will include also common mode per pixel pair  if set to True
    """

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data, help="Observation detdata key to inject the gain drift"
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    crfile = Unicode(None, help="Path to the *.npz file encoding cosmic ray infos")

    crdata_units = Unit(u.W, help="The units of the input amplitudes")

    realization = Int(0, help="integer to set a different random seed ")

    eventrate = Float(
        0.0015,
        help="the expected event rate of hits in a detector",
    )

    inject_direct_hits = Bool(False, help="inject  direct hits as glitches in the TODs")

    conversion_factor = Quantity(
        1 * u.K / u.W,
        help="factor to convert the cosmic ray signal (usually Watts) into temperature units",
    )

    include_common_mode = Bool(
        False, help="will include also common mode per pixel pair  if set to True"
    )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def load_cosmic_ray_data(self, filename):
        data_dic = np.load(filename)

        return data_dic

    def resample_cosmic_ray_statistics(self, arr, Nresamples, key, counter):
        resampled = np.zeros((Nresamples, arr.shape[1]))

        for ii in range(arr.shape[1]):
            # Resample by considering the bulk of  the Parameter distribution ~2sigma central interval
            bins = np.linspace(
                np.quantile(
                    arr[:, ii],
                    0.025,
                ),
                np.quantile(arr[:, ii], 0.975),
                30,
            )

            binned, edges = np.histogram(arr[:, ii], bins=bins)

            xb = 0.5 * (edges[:-1] + edges[1:])
            CDF = np.cumsum(binned) / binned.sum()

            pinv = interpolate.interp1d(CDF, xb, fill_value="extrapolate")
            # r = np.random.rand(Nresamples)
            r = rng.random(
                Nresamples,
                sampler="uniform_01",
                key=key,
                counter=counter,
            )

            resampled[:, ii] = pinv(r)

        return resampled

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        env = Environment.get()
        log = Logger.get()
        if self.crfile is None:
            raise AttributeError(
                "OpInjectCosmicRays cannot run if you don't provide cosmic ray data."
            )
        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            comm = ob.comm.comm_group
            rank = ob.comm.group_rank
            # Make sure detector data output exists
            exists = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )
            sindx = ob.session.uid
            telescope = ob.telescope.uid
            focalplane = ob.telescope.focalplane
            size = ob.detdata[self.det_data][dets[0]].size
            samplerate = focalplane.sample_rate.to_value(u.Hz)

            obstime_seconds = size / samplerate
            n_events_expected = self.eventrate * obstime_seconds
            key1 = self.realization * 4294967296 + telescope * 65536
            counter2 = 0

            for kk, det in enumerate(dets):
                detindx = focalplane[det]["uid"]
                key2 = sindx
                counter1 = detindx

                rngdata = rng.random(
                    size,
                    sampler="gaussian",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )
                counter2 += size
                filename = self.crfile.replace("detector", f"det{kk}")
                data_dic = self.load_cosmic_ray_data(filename)
                lownoise_params = data_dic["low_noise"]
                var_tot = lownoise_params[1] ** 2
                if not self.include_common_mode:
                    lownoise_hits = lownoise_params[1] * rngdata + lownoise_params[0]
                    tmparray = lownoise_hits
                else:
                    if kk % 2 != 0:  # if kk is odd
                        detid_common = kk - 1
                        kkcol = kk - 1
                    else:  # kk even
                        detid_common = kk
                        kkcol = kk + 1

                    filename_common = self.crfile.replace(
                        "detector", f"det{detid_common}"
                    )
                    data_common = self.load_cosmic_ray_data(filename_common)
                    try:
                        corr_matr = data_common["correlation_matrix"]
                        corr_frac = corr_matr[kk, kkcol]

                    except KeyError:
                        log.warning(
                            "Correlation matrix not provided for common mode, assuming 50% correlation "
                        )
                        corr_frac = 0.5

                    var_corr = corr_frac * data_common["low_noise"][1] ** 2
                    var0 = var_tot - var_corr

                    rngdata_common = rng.random(
                        size,
                        sampler="gaussian",
                        key=(key1, key2),
                        counter=(detid_common, counter2),
                    )
                    counter2 += size
                    cr_common_mode = (
                        np.sqrt(var_corr) * rngdata_common + data_common["low_noise"][0]
                    )

                    lownoise_hits = lownoise_params[1] * rngdata + lownoise_params[0]
                    tmparray = lownoise_hits + cr_common_mode

                if self.inject_direct_hits:
                    glitches_param_distr = data_dic["direct_hits"]
                    fsampl_sims = (data_dic["sampling_rate"][0] * u.Hz).value

                    glitch_seconds = 0.15  # seconds, i.e. ~ 3samples at 19Hz
                    # we approximate the number of samples to the closest integer
                    nsamples_high = int(np.around(glitch_seconds * fsampl_sims))
                    nsamples_low = int(np.around(glitch_seconds * samplerate))
                    # import pdb; pdb.set_trace()
                    # np.random.seed( obsindx//1e3  +detindx//1e3 )
                    n_events = np.random.poisson(n_events_expected)
                    params = self.resample_cosmic_ray_statistics(
                        glitches_param_distr,
                        Nresamples=n_events,
                        key=(key1, key2),
                        counter=(counter1, counter2),
                    )
                    counter2 += n_events
                    # draw n_events uniformly from a continuous distribution
                    # you want the events to happen during one observation
                    # we also make sure that the glitch is injected at most
                    # `glitch_seconds` before the end of the observation ,
                    # otherwise we've problems in downsampling
                    rngunif = rng.random(
                        n_events,
                        sampler="uniform_01",
                        key=(key1, key2),
                        counter=(counter1, counter2),
                    )
                    counter2 += n_events

                    time_glitches = (obstime_seconds - glitch_seconds) * rngunif
                    assert time_glitches.max() < obstime_seconds

                    # otherwise we've problems in downsampling

                    # estimate the timestamps rounding off the events in seconds
                    time_stamp_glitches = np.around(time_glitches * samplerate).astype(
                        np.int64
                    )
                    # we measure the glitch and the bestfit timeconstant in millisec
                    tglitch = np.linspace(0, glitch_seconds * 1e3, nsamples_high)
                    glitch_func = lambda t, C1, C2, tau: C1 + (C2 * np.exp(-t / tau))
                    for i in range(n_events):
                        tmphit = glitch_func(tglitch, *params[i])
                        tmparray[
                            time_stamp_glitches[i] : time_stamp_glitches[i]
                            + nsamples_low
                        ] = signal.resample(tmphit, num=nsamples_low, t=tglitch)[0]
                tmparray = tmparray * self.crdata_units
                ob.detdata[self.det_data][det] += (
                    self.conversion_factor * tmparray
                ).to_value(self.det_data_units)

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": [
                self.boresight,
            ],
            "detdata": [self.det_data],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [
                self.det_data,
            ],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

conversion_factor = Quantity(1 * u.K / u.W, help='factor to convert the cosmic ray signal (usually Watts) into temperature units') class-attribute instance-attribute

crdata_units = Unit(u.W, help='The units of the input amplitudes') class-attribute instance-attribute

crfile = Unicode(None, help='Path to the *.npz file encoding cosmic ray infos') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key to inject the gain drift') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

eventrate = Float(0.0015, help='the expected event rate of hits in a detector') class-attribute instance-attribute

include_common_mode = Bool(False, help='will include also common mode per pixel pair if set to True') class-attribute instance-attribute

inject_direct_hits = Bool(False, help='inject direct hits as glitches in the TODs') class-attribute instance-attribute

realization = Int(0, help='integer to set a different random seed ') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_cosmic_rays.py
94
95
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_cosmic_rays.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    env = Environment.get()
    log = Logger.get()
    if self.crfile is None:
        raise AttributeError(
            "OpInjectCosmicRays cannot run if you don't provide cosmic ray data."
        )
    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        comm = ob.comm.comm_group
        rank = ob.comm.group_rank
        # Make sure detector data output exists
        exists = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )
        sindx = ob.session.uid
        telescope = ob.telescope.uid
        focalplane = ob.telescope.focalplane
        size = ob.detdata[self.det_data][dets[0]].size
        samplerate = focalplane.sample_rate.to_value(u.Hz)

        obstime_seconds = size / samplerate
        n_events_expected = self.eventrate * obstime_seconds
        key1 = self.realization * 4294967296 + telescope * 65536
        counter2 = 0

        for kk, det in enumerate(dets):
            detindx = focalplane[det]["uid"]
            key2 = sindx
            counter1 = detindx

            rngdata = rng.random(
                size,
                sampler="gaussian",
                key=(key1, key2),
                counter=(counter1, counter2),
            )
            counter2 += size
            filename = self.crfile.replace("detector", f"det{kk}")
            data_dic = self.load_cosmic_ray_data(filename)
            lownoise_params = data_dic["low_noise"]
            var_tot = lownoise_params[1] ** 2
            if not self.include_common_mode:
                lownoise_hits = lownoise_params[1] * rngdata + lownoise_params[0]
                tmparray = lownoise_hits
            else:
                if kk % 2 != 0:  # if kk is odd
                    detid_common = kk - 1
                    kkcol = kk - 1
                else:  # kk even
                    detid_common = kk
                    kkcol = kk + 1

                filename_common = self.crfile.replace(
                    "detector", f"det{detid_common}"
                )
                data_common = self.load_cosmic_ray_data(filename_common)
                try:
                    corr_matr = data_common["correlation_matrix"]
                    corr_frac = corr_matr[kk, kkcol]

                except KeyError:
                    log.warning(
                        "Correlation matrix not provided for common mode, assuming 50% correlation "
                    )
                    corr_frac = 0.5

                var_corr = corr_frac * data_common["low_noise"][1] ** 2
                var0 = var_tot - var_corr

                rngdata_common = rng.random(
                    size,
                    sampler="gaussian",
                    key=(key1, key2),
                    counter=(detid_common, counter2),
                )
                counter2 += size
                cr_common_mode = (
                    np.sqrt(var_corr) * rngdata_common + data_common["low_noise"][0]
                )

                lownoise_hits = lownoise_params[1] * rngdata + lownoise_params[0]
                tmparray = lownoise_hits + cr_common_mode

            if self.inject_direct_hits:
                glitches_param_distr = data_dic["direct_hits"]
                fsampl_sims = (data_dic["sampling_rate"][0] * u.Hz).value

                glitch_seconds = 0.15  # seconds, i.e. ~ 3samples at 19Hz
                # we approximate the number of samples to the closest integer
                nsamples_high = int(np.around(glitch_seconds * fsampl_sims))
                nsamples_low = int(np.around(glitch_seconds * samplerate))
                # import pdb; pdb.set_trace()
                # np.random.seed( obsindx//1e3  +detindx//1e3 )
                n_events = np.random.poisson(n_events_expected)
                params = self.resample_cosmic_ray_statistics(
                    glitches_param_distr,
                    Nresamples=n_events,
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )
                counter2 += n_events
                # draw n_events uniformly from a continuous distribution
                # you want the events to happen during one observation
                # we also make sure that the glitch is injected at most
                # `glitch_seconds` before the end of the observation ,
                # otherwise we've problems in downsampling
                rngunif = rng.random(
                    n_events,
                    sampler="uniform_01",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )
                counter2 += n_events

                time_glitches = (obstime_seconds - glitch_seconds) * rngunif
                assert time_glitches.max() < obstime_seconds

                # otherwise we've problems in downsampling

                # estimate the timestamps rounding off the events in seconds
                time_stamp_glitches = np.around(time_glitches * samplerate).astype(
                    np.int64
                )
                # we measure the glitch and the bestfit timeconstant in millisec
                tglitch = np.linspace(0, glitch_seconds * 1e3, nsamples_high)
                glitch_func = lambda t, C1, C2, tau: C1 + (C2 * np.exp(-t / tau))
                for i in range(n_events):
                    tmphit = glitch_func(tglitch, *params[i])
                    tmparray[
                        time_stamp_glitches[i] : time_stamp_glitches[i]
                        + nsamples_low
                    ] = signal.resample(tmphit, num=nsamples_low, t=tglitch)[0]
            tmparray = tmparray * self.crdata_units
            ob.detdata[self.det_data][det] += (
                self.conversion_factor * tmparray
            ).to_value(self.det_data_units)

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_cosmic_rays.py
279
280
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_cosmic_rays.py
295
296
297
298
299
300
301
302
303
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [
            self.det_data,
        ],
    }
    return prov

_requires()

Source code in toast/ops/sim_cosmic_rays.py
282
283
284
285
286
287
288
289
290
291
292
293
def _requires(self):
    req = {
        "meta": list(),
        "shared": [
            self.boresight,
        ],
        "detdata": [self.det_data],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

load_cosmic_ray_data(filename)

Source code in toast/ops/sim_cosmic_rays.py
 97
 98
 99
100
def load_cosmic_ray_data(self, filename):
    data_dic = np.load(filename)

    return data_dic

resample_cosmic_ray_statistics(arr, Nresamples, key, counter)

Source code in toast/ops/sim_cosmic_rays.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def resample_cosmic_ray_statistics(self, arr, Nresamples, key, counter):
    resampled = np.zeros((Nresamples, arr.shape[1]))

    for ii in range(arr.shape[1]):
        # Resample by considering the bulk of  the Parameter distribution ~2sigma central interval
        bins = np.linspace(
            np.quantile(
                arr[:, ii],
                0.025,
            ),
            np.quantile(arr[:, ii], 0.975),
            30,
        )

        binned, edges = np.histogram(arr[:, ii], bins=bins)

        xb = 0.5 * (edges[:-1] + edges[1:])
        CDF = np.cumsum(binned) / binned.sum()

        pinv = interpolate.interp1d(CDF, xb, fill_value="extrapolate")
        # r = np.random.rand(Nresamples)
        r = rng.random(
            Nresamples,
            sampler="uniform_01",
            key=key,
            counter=counter,
        )

        resampled[:, ii] = pinv(r)

    return resampled

toast.ops.GainDrifter

Bases: Operator

Operator which injects gain drifts to the signal.

The drift can be injected into 3 different ways: - linear_drift: inject a linear drift with a random slope for each detector - slow_drift: inject a drift signal with a 1/f PSD, simulated up to the frequencies<cutoff_freq, in case cutoff_freq< (1/t_obs), cutoff_freq=1/t_obs. - thermal_drift: inject a drift encoding frequencies up to the sampling rate, to simulate the thermal fluctuations in the focalplane. Both slow_drift and thermal_drift modes encode the possibility to inject a common mode drifts to all the detectors belonging to a group of detectors identified the string focalplane_group ( can be any string set by the user used to identify the groups in the detector table). The amount of common mode contribution is set by setting detector_mismatch to a value <1, (with 0 being the case with only injecting common mode signal).

Source code in toast/ops/sim_gaindrifts.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
@trait_docs
class GainDrifter(Operator):
    """Operator which injects gain drifts to the signal.

    The drift can be injected into 3 different ways:
    - `linear_drift`: inject a linear drift   with a random slope  for each detector
    - `slow_drift`: inject a drift signal with a `1/f` PSD, simulated up to
    the  frequencies<`cutoff_freq`, in case `cutoff_freq< (1/t_obs)`, `cutoff_freq=1/t_obs`.
    - `thermal_drift`: inject a drift encoding frequencies up to the sampling rate, to simulate
    the thermal fluctuations in the focalplane.
    Both `slow_drift` and `thermal_drift` modes encode the possibility to inject a common mode drifts
    to all the detectors belonging to a group of detectors identified the string `focalplane_group` ( can
    be any string set by the user used to identify the groups in the detector table).
    The amount of common mode contribution is set by setting detector_mismatch to a value `<1`, (with
    0 being the case with only injecting common mode signal).

    """

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data, help="Observation detdata key to inject the gain drift"
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    include_common_mode = Bool(
        False, help="If True, inject a common drift to all the local detector group "
    )

    fknee_drift = Quantity(
        20.0 * u.mHz,
        help="fknee of the drift signal",
    )
    cutoff_freq = Quantity(
        0.2 * u.mHz,
        help="cutoff  frequency to simulate a slow  drift (assumed < sampling rate)",
    )
    sigma_drift = Float(
        1e-3,
        help="dimensionless amplitude  of the drift signal, (for `thermal_drift` corresponds to the thermal fluctuation level in K units)",
    )
    alpha_drift = Float(
        1.0,
        help="spectral index  of the drift signal spectrum",
    )

    detector_mismatch = Float(
        1.0,
        help="mismatch between detectors for `thermal_drift` and `slow_drift` ranging from 0 to 1. Default value implies no common mode injected",
    )
    thermal_fluctuation_amplitude = Quantity(
        1 * u.K,
        help="Amplitude of thermal fluctuation for `thermal_drift` in  Kelvin units ",
    )
    focalplane_Tbath = Quantity(
        100 * u.mK,
        help="temperature of the focalplane for `thermal_drift` ",
    )
    responsivity_function = Callable(
        lambda dT: dT,
        help="Responsivity function takes as input  the thermal  fluctuations,`dT` defined as `dT=Tdrift/Tbath + 1 `. Default we assume the identity function ",
    )

    realization = Int(0, help="integer to set a different random seed ")
    component = Int(0, allow_none=False, help="Component index for this simulation")

    drift_mode = Unicode(
        "linear",
        help="a string from [linear_drift, thermal_drift, slow_drift] to set the way the drift is modelled",
    )

    focalplane_group = Unicode(
        "wafer",
        help='focalplane table column to use for grouping detectors: can be any string like "wafer", "pixel"',
    )

    def get_psd(self, f):
        return (
            self.sigma_drift**2
            * (self.fknee_drift.to_value(u.Hz) / f) ** self.alpha_drift
        )

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        """
        Generate gain timestreams.

        This iterates over all observations and detectors, simulates a gain drift across the observation time
        and  multiplies it   to the  signal TOD of the  detectors in each detector pair.


        Args:
            data (toast.Data): The distributed data.
        """
        env = Environment.get()
        log = Logger.get()

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue
            comm = ob.comm.comm_group
            rank = ob.comm.group_rank
            # Make sure detector data output exists
            exists = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )
            sindx = ob.session.uid
            telescope = ob.telescope.uid
            focalplane = ob.telescope.focalplane
            size = ob.detdata[self.det_data][dets[0]].size
            fsampl = focalplane.sample_rate.to_value(u.Hz)

            if self.drift_mode == "linear_drift":
                key1 = (
                    self.realization * 4294967296 + telescope * 65536 + self.component
                )
                counter2 = 0

                for det in dets:
                    detindx = focalplane[det]["uid"]
                    key2 = sindx
                    counter1 = detindx

                    rngdata = rng.random(
                        1,
                        sampler="gaussian",
                        key=(key1, key2),
                        counter=(counter1, counter2),
                    )
                    gf = 1 + rngdata[0] * self.sigma_drift
                    gain = (gf - 1) * np.linspace(0, 1, size) + 1

                    ob.detdata[self.det_data][det] *= gain

            elif self.drift_mode == "thermal_drift":
                fmin = fsampl / (4 * size)
                # the factor of 4x the length of the sample vector  is
                # to avoid circular correlations
                freq = np.logspace(np.log10(fmin), np.log10(fsampl / 2.0), 1000)
                psd = self.get_psd(freq)
                det_group = np.unique(focalplane.detector_data[self.focalplane_group])
                thermal_fluct = np.zeros(
                    (len(det_group), ob.n_local_samples), dtype=np.float64
                )
                for iw, w in enumerate(det_group):
                    # simulate a noise-like timestream
                    thermal_fluct[iw][:] = sim_noise_timestream(
                        realization=self.realization,
                        telescope=ob.telescope.uid,
                        component=self.component,
                        sindx=sindx,
                        # we generate the same timestream for the
                        # detectors in the same group
                        detindx=iw,
                        rate=fsampl,
                        firstsamp=ob.local_index_offset,
                        samples=ob.n_local_samples,
                        freq=freq,
                        psd=psd,
                        py=False,
                    )

                for det in dets:
                    detindx = focalplane[det]["uid"]
                    # we inject a detector mismatch in the thermal thermal_fluctuation
                    # only if the mismatch !=0
                    if self.detector_mismatch != 0:
                        key1 = (
                            self.realization * 429496123345
                            + telescope * 6512345
                            + self.component
                        )
                        counter1 = detindx
                        counter2 = 0
                        key2 = sindx
                        rngdata = rng.random(
                            1,
                            sampler="gaussian",
                            key=(key1, key2),
                            counter=(counter1, counter2),
                        )
                        rngdata = 1 + rngdata[0] * self.detector_mismatch
                        thermal_factor = self.thermal_fluctuation_amplitude * rngdata
                    else:
                        thermal_factor = self.thermal_fluctuation_amplitude

                    # identify to which group the detector belongs
                    mask = focalplane[det][self.focalplane_group] == det_group

                    # assign the thermal fluct. simulated for that det. group
                    # making sure that  Tdrift is in the same units as Tbath
                    Tdrift = (thermal_factor * thermal_fluct[mask][0]).to(
                        self.focalplane_Tbath.unit
                    )
                    # we make dT an array of floats (from an array of dimensionless Quantity),
                    # this will avoid unit errors when multiplied to the det_data.

                    dT = (Tdrift / self.focalplane_Tbath + 1).to_value()

                    ob.detdata[self.det_data][det] *= self.responsivity_function(dT)

            elif self.drift_mode == "slow_drift":
                fmin = fsampl / (4 * size)
                # the factor of 4x the length of the sample vector  is
                # to avoid circular correlations
                freq = np.logspace(np.log10(fmin), np.log10(fsampl / 2.0), 1000)
                # making sure that the cut-off  frequency
                # is always above the  observation time scale .
                cutoff = np.max([self.cutoff_freq.to_value(u.Hz), fsampl / size])
                argmin = np.argmin(np.fabs(freq - cutoff))

                psd = np.concatenate(
                    [self.get_psd(freq[:argmin]), np.zeros_like(freq[argmin:])]
                )
                det_group = np.unique(focalplane.detector_data[self.focalplane_group])

                # if the mismatch is maximum (i.e. =1 ) we don't
                # inject the common mode but only an indepedendent slow drift

                if self.detector_mismatch == 1:
                    gain_common = np.zeros_like(det_group, dtype=np.float64)
                else:
                    gain_common = []
                    for iw, w in enumerate(det_group):
                        gain = sim_noise_timestream(
                            realization=self.realization,
                            telescope=ob.telescope.uid,
                            component=self.component,
                            sindx=sindx,
                            detindx=iw,  # drift common to all detectors
                            rate=fsampl,
                            firstsamp=ob.local_index_offset,
                            samples=ob.n_local_samples,
                            freq=freq,
                            psd=psd,
                            py=False,
                        )
                        gain_common.append(np.array(gain))
                        gain.clear()
                        del gain
                gain_common = np.array(gain_common)

                for det in dets:
                    detindx = focalplane[det]["uid"]
                    size = ob.detdata[self.det_data][det].size

                    # simulate a noise-like timestream

                    gain = sim_noise_timestream(
                        realization=self.realization,
                        telescope=ob.telescope.uid,
                        component=self.component,
                        sindx=sindx,
                        detindx=detindx,
                        rate=fsampl,
                        firstsamp=ob.local_index_offset,
                        samples=ob.n_local_samples,
                        freq=freq,
                        psd=psd,
                        py=False,
                    )
                    # identify to which group the detector belongs
                    mask = focalplane[det][self.focalplane_group] == det_group
                    # assign the thermal fluct. simulated for that det. group
                    ob.detdata[self.det_data][det] *= (
                        1
                        + (self.detector_mismatch * gain.array())
                        + (1 - self.detector_mismatch) * gain_common[mask][0]
                    )
                    gain.clear()
                    del gain

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": [
                self.boresight,
            ],
            "detdata": [self.det_data],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [
                self.det_data,
            ],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

alpha_drift = Float(1.0, help='spectral index of the drift signal spectrum') class-attribute instance-attribute

component = Int(0, allow_none=False, help='Component index for this simulation') class-attribute instance-attribute

cutoff_freq = Quantity(0.2 * u.mHz, help='cutoff frequency to simulate a slow drift (assumed < sampling rate)') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key to inject the gain drift') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

detector_mismatch = Float(1.0, help='mismatch between detectors for `thermal_drift` and `slow_drift` ranging from 0 to 1. Default value implies no common mode injected') class-attribute instance-attribute

drift_mode = Unicode('linear', help='a string from [linear_drift, thermal_drift, slow_drift] to set the way the drift is modelled') class-attribute instance-attribute

fknee_drift = Quantity(20.0 * u.mHz, help='fknee of the drift signal') class-attribute instance-attribute

focalplane_Tbath = Quantity(100 * u.mK, help='temperature of the focalplane for `thermal_drift` ') class-attribute instance-attribute

focalplane_group = Unicode('wafer', help='focalplane table column to use for grouping detectors: can be any string like "wafer", "pixel"') class-attribute instance-attribute

include_common_mode = Bool(False, help='If True, inject a common drift to all the local detector group ') class-attribute instance-attribute

realization = Int(0, help='integer to set a different random seed ') class-attribute instance-attribute

responsivity_function = Callable(lambda dT: dT, help='Responsivity function takes as input the thermal fluctuations,`dT` defined as `dT=Tdrift/Tbath + 1 `. Default we assume the identity function ') class-attribute instance-attribute

sigma_drift = Float(0.001, help='dimensionless amplitude of the drift signal, (for `thermal_drift` corresponds to the thermal fluctuation level in K units)') class-attribute instance-attribute

thermal_fluctuation_amplitude = Quantity(1 * u.K, help='Amplitude of thermal fluctuation for `thermal_drift` in Kelvin units ') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_gaindrifts.py
103
104
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Generate gain timestreams.

This iterates over all observations and detectors, simulates a gain drift across the observation time and multiplies it to the signal TOD of the detectors in each detector pair.

Parameters:

Name Type Description Default
data Data

The distributed data.

required
Source code in toast/ops/sim_gaindrifts.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    """
    Generate gain timestreams.

    This iterates over all observations and detectors, simulates a gain drift across the observation time
    and  multiplies it   to the  signal TOD of the  detectors in each detector pair.


    Args:
        data (toast.Data): The distributed data.
    """
    env = Environment.get()
    log = Logger.get()

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue
        comm = ob.comm.comm_group
        rank = ob.comm.group_rank
        # Make sure detector data output exists
        exists = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )
        sindx = ob.session.uid
        telescope = ob.telescope.uid
        focalplane = ob.telescope.focalplane
        size = ob.detdata[self.det_data][dets[0]].size
        fsampl = focalplane.sample_rate.to_value(u.Hz)

        if self.drift_mode == "linear_drift":
            key1 = (
                self.realization * 4294967296 + telescope * 65536 + self.component
            )
            counter2 = 0

            for det in dets:
                detindx = focalplane[det]["uid"]
                key2 = sindx
                counter1 = detindx

                rngdata = rng.random(
                    1,
                    sampler="gaussian",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )
                gf = 1 + rngdata[0] * self.sigma_drift
                gain = (gf - 1) * np.linspace(0, 1, size) + 1

                ob.detdata[self.det_data][det] *= gain

        elif self.drift_mode == "thermal_drift":
            fmin = fsampl / (4 * size)
            # the factor of 4x the length of the sample vector  is
            # to avoid circular correlations
            freq = np.logspace(np.log10(fmin), np.log10(fsampl / 2.0), 1000)
            psd = self.get_psd(freq)
            det_group = np.unique(focalplane.detector_data[self.focalplane_group])
            thermal_fluct = np.zeros(
                (len(det_group), ob.n_local_samples), dtype=np.float64
            )
            for iw, w in enumerate(det_group):
                # simulate a noise-like timestream
                thermal_fluct[iw][:] = sim_noise_timestream(
                    realization=self.realization,
                    telescope=ob.telescope.uid,
                    component=self.component,
                    sindx=sindx,
                    # we generate the same timestream for the
                    # detectors in the same group
                    detindx=iw,
                    rate=fsampl,
                    firstsamp=ob.local_index_offset,
                    samples=ob.n_local_samples,
                    freq=freq,
                    psd=psd,
                    py=False,
                )

            for det in dets:
                detindx = focalplane[det]["uid"]
                # we inject a detector mismatch in the thermal thermal_fluctuation
                # only if the mismatch !=0
                if self.detector_mismatch != 0:
                    key1 = (
                        self.realization * 429496123345
                        + telescope * 6512345
                        + self.component
                    )
                    counter1 = detindx
                    counter2 = 0
                    key2 = sindx
                    rngdata = rng.random(
                        1,
                        sampler="gaussian",
                        key=(key1, key2),
                        counter=(counter1, counter2),
                    )
                    rngdata = 1 + rngdata[0] * self.detector_mismatch
                    thermal_factor = self.thermal_fluctuation_amplitude * rngdata
                else:
                    thermal_factor = self.thermal_fluctuation_amplitude

                # identify to which group the detector belongs
                mask = focalplane[det][self.focalplane_group] == det_group

                # assign the thermal fluct. simulated for that det. group
                # making sure that  Tdrift is in the same units as Tbath
                Tdrift = (thermal_factor * thermal_fluct[mask][0]).to(
                    self.focalplane_Tbath.unit
                )
                # we make dT an array of floats (from an array of dimensionless Quantity),
                # this will avoid unit errors when multiplied to the det_data.

                dT = (Tdrift / self.focalplane_Tbath + 1).to_value()

                ob.detdata[self.det_data][det] *= self.responsivity_function(dT)

        elif self.drift_mode == "slow_drift":
            fmin = fsampl / (4 * size)
            # the factor of 4x the length of the sample vector  is
            # to avoid circular correlations
            freq = np.logspace(np.log10(fmin), np.log10(fsampl / 2.0), 1000)
            # making sure that the cut-off  frequency
            # is always above the  observation time scale .
            cutoff = np.max([self.cutoff_freq.to_value(u.Hz), fsampl / size])
            argmin = np.argmin(np.fabs(freq - cutoff))

            psd = np.concatenate(
                [self.get_psd(freq[:argmin]), np.zeros_like(freq[argmin:])]
            )
            det_group = np.unique(focalplane.detector_data[self.focalplane_group])

            # if the mismatch is maximum (i.e. =1 ) we don't
            # inject the common mode but only an indepedendent slow drift

            if self.detector_mismatch == 1:
                gain_common = np.zeros_like(det_group, dtype=np.float64)
            else:
                gain_common = []
                for iw, w in enumerate(det_group):
                    gain = sim_noise_timestream(
                        realization=self.realization,
                        telescope=ob.telescope.uid,
                        component=self.component,
                        sindx=sindx,
                        detindx=iw,  # drift common to all detectors
                        rate=fsampl,
                        firstsamp=ob.local_index_offset,
                        samples=ob.n_local_samples,
                        freq=freq,
                        psd=psd,
                        py=False,
                    )
                    gain_common.append(np.array(gain))
                    gain.clear()
                    del gain
            gain_common = np.array(gain_common)

            for det in dets:
                detindx = focalplane[det]["uid"]
                size = ob.detdata[self.det_data][det].size

                # simulate a noise-like timestream

                gain = sim_noise_timestream(
                    realization=self.realization,
                    telescope=ob.telescope.uid,
                    component=self.component,
                    sindx=sindx,
                    detindx=detindx,
                    rate=fsampl,
                    firstsamp=ob.local_index_offset,
                    samples=ob.n_local_samples,
                    freq=freq,
                    psd=psd,
                    py=False,
                )
                # identify to which group the detector belongs
                mask = focalplane[det][self.focalplane_group] == det_group
                # assign the thermal fluct. simulated for that det. group
                ob.detdata[self.det_data][det] *= (
                    1
                    + (self.detector_mismatch * gain.array())
                    + (1 - self.detector_mismatch) * gain_common[mask][0]
                )
                gain.clear()
                del gain

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_gaindrifts.py
301
302
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_gaindrifts.py
317
318
319
320
321
322
323
324
325
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [
            self.det_data,
        ],
    }
    return prov

_requires()

Source code in toast/ops/sim_gaindrifts.py
304
305
306
307
308
309
310
311
312
313
314
315
def _requires(self):
    req = {
        "meta": list(),
        "shared": [
            self.boresight,
        ],
        "detdata": [self.det_data],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

get_psd(f)

Source code in toast/ops/sim_gaindrifts.py
 97
 98
 99
100
101
def get_psd(self, f):
    return (
        self.sigma_drift**2
        * (self.fknee_drift.to_value(u.Hz) / f) ** self.alpha_drift
    )

toast.ops.GainScrambler

Bases: Operator

Apply random gain errors to detector data.

This operator draws random gain errors from a given distribution and applies them to the specified detectors.

Source code in toast/ops/gainscrambler.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
@trait_docs
class GainScrambler(Operator):
    """Apply random gain errors to detector data.

    This operator draws random gain errors from a given distribution and
    applies them to the specified detectors.
    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_data = Unicode(
        defaults.det_data, help="Observation detdata key to apply the gain error to"
    )

    pattern = Unicode(
        f".*",
        allow_none=True,
        help="Regex pattern to match against detector names. Only detectors that "
        "match the pattern are scrambled.",
    )
    center = Float(1, allow_none=False, help="Gain distribution center")

    sigma = Float(1e-3, allow_none=False, help="Gain distribution width")

    realization = Int(0, allow_none=False, help="Realization index")

    component = Int(0, allow_none=False, help="Component index for this simulation")

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        log = Logger.get()

        if self.pattern is None:
            pat = None
        else:
            pat = re.compile(self.pattern)

        for obs in data.obs:
            # Get the detectors we are using for this observation
            dets = obs.select_local_detectors(detectors)
            if len(dets) == 0:
                # Nothing to do for this observation
                continue

            comm = obs.comm.comm_group
            rank = obs.comm.group_rank

            sindx = obs.session.uid
            telescope = obs.telescope.uid

            focalplane = obs.telescope.focalplane

            # key1 = realization * 2^32 + telescope * 2^16 + component
            key1 = self.realization * 4294967296 + telescope * 65536 + self.component
            key2 = sindx
            counter1 = 0
            counter2 = 0

            dets_present = set(obs.detdata[self.det_data].detectors)

            for det in dets:
                # Test the detector pattern
                if pat is not None and pat.match(det) is None:
                    continue

                detindx = focalplane[det]["uid"]
                counter1 = detindx

                rngdata = rng.random(
                    1,
                    sampler="gaussian",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )

                gain = self.center + rngdata[0] * self.sigma
                if det in dets_present:
                    obs.detdata[self.det_data][det] *= gain

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": list(),
            "detdata": [self.det_data],
            "intervals": list(),
        }
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": list(),
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

center = Float(1, allow_none=False, help='Gain distribution center') class-attribute instance-attribute

component = Int(0, allow_none=False, help='Component index for this simulation') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, help='Observation detdata key to apply the gain error to') class-attribute instance-attribute

pattern = Unicode(f'.*', allow_none=True, help='Regex pattern to match against detector names. Only detectors that match the pattern are scrambled.') class-attribute instance-attribute

realization = Int(0, allow_none=False, help='Realization index') class-attribute instance-attribute

sigma = Float(0.001, allow_none=False, help='Gain distribution width') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/gainscrambler.py
47
48
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/gainscrambler.py
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    log = Logger.get()

    if self.pattern is None:
        pat = None
    else:
        pat = re.compile(self.pattern)

    for obs in data.obs:
        # Get the detectors we are using for this observation
        dets = obs.select_local_detectors(detectors)
        if len(dets) == 0:
            # Nothing to do for this observation
            continue

        comm = obs.comm.comm_group
        rank = obs.comm.group_rank

        sindx = obs.session.uid
        telescope = obs.telescope.uid

        focalplane = obs.telescope.focalplane

        # key1 = realization * 2^32 + telescope * 2^16 + component
        key1 = self.realization * 4294967296 + telescope * 65536 + self.component
        key2 = sindx
        counter1 = 0
        counter2 = 0

        dets_present = set(obs.detdata[self.det_data].detectors)

        for det in dets:
            # Test the detector pattern
            if pat is not None and pat.match(det) is None:
                continue

            detindx = focalplane[det]["uid"]
            counter1 = detindx

            rngdata = rng.random(
                1,
                sampler="gaussian",
                key=(key1, key2),
                counter=(counter1, counter2),
            )

            gain = self.center + rngdata[0] * self.sigma
            if det in dets_present:
                obs.detdata[self.det_data][det] *= gain

    return

_finalize(data, **kwargs)

Source code in toast/ops/gainscrambler.py
103
104
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/gainscrambler.py
115
116
117
118
119
120
121
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": list(),
    }
    return prov

_requires()

Source code in toast/ops/gainscrambler.py
106
107
108
109
110
111
112
113
def _requires(self):
    req = {
        "meta": list(),
        "shared": list(),
        "detdata": [self.det_data],
        "intervals": list(),
    }
    return req

toast.ops.PerturbHWP

Bases: Operator

Operator that adds irregularities to HWP rotation

Source code in toast/ops/sim_hwp.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
@trait_docs
class PerturbHWP(Operator):
    """Operator that adds irregularities to HWP rotation"""

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    times = Unicode(defaults.times, help="Observation shared key for timestamps")

    hwp_angle = Unicode(
        defaults.hwp_angle,
        allow_none=True,
        help="Observation shared key for HWP angle",
    )

    drift_sigma = Quantity(
        None,
        allow_none=True,
        help="1-sigma relative change in spin rate, such as 0.01 / hour",
    )

    time_sigma = Quantity(
        None,
        allow_none=True,
        help="1-sigma difference between real and nominal time stamps",
    )

    realization = Int(0, allow_none=False, help="Realization index")

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        t0 = time()
        log = Logger.get()

        for trait in ("times", "hwp_angle"):
            if getattr(self, trait) is None:
                msg = f"You must set the '{trait}' trait before calling exec()"
                raise RuntimeError(msg)

        all_failed = 0
        for iobs, obs in enumerate(data.obs):
            offset = obs.local_index_offset
            nlocal = obs.n_local_samples
            ntotal = obs.n_all_samples

            # Get an RNG seed
            key1 = self.realization * 1543343 + obs.telescope.uid
            key2 = obs.session.uid
            counter1 = 0

            # The times and hwp_angle are shared among columns of the process
            # grid.  Only the first process row needs to modify the data.
            if (
                obs.shared.comm_type(self.times) != "column"
                or obs.shared.comm_type(self.hwp_angle) != "column"
            ):
                msg = f"obs {obs.name}: expected shared fields {self.times} and "
                msg += f"{self.hwp_angle} to be on the column communicator."
                log.error(msg)
                all_failed += 1
                continue

            failed = 0
            new_angle = None
            if obs.comm_col_rank == 0:
                times = obs.shared[self.times].data
                hwp_angle = obs.shared[self.hwp_angle].data

                # We are in the first process row.  In our RNG generation,
                # "counter2" corresponds to the sample index.  If there are
                # multiple processes in the grid row, start our RNG stream
                # at the first sample on this process.
                counter2 = obs.local_index_offset

                time_delta = times[-1] - times[0]

                # Simulate timing error (jitter)
                if self.time_sigma is None:
                    time_error = 0
                else:
                    component = 0
                    rngdata = rng.random(
                        times.size,
                        sampler="gaussian",
                        key=(key1, key2 + component),
                        counter=(counter1, counter2),
                    )
                    time_error = np.array(rngdata) * self.time_sigma.to_value(u.s)
                new_times = times + time_error
                if np.any(np.diff(new_times) <= 0):
                    raise RuntimeError("Simulated timing error causes time travel")

                # Simulate rate drift
                unwrapped = np.unwrap(hwp_angle)
                median_step = np.median(np.diff(unwrapped))
                if np.abs(median_step) < 1e-10:
                    # This was a stepped HWP, not continuously rotating
                    msg = f"obs {obs.name}: Don't know now to perturb a stepped HWP. "
                    msg += f"Median step size is {np.degrees(median_step)} deg"
                    log.error(msg)
                    failed += 1
                else:
                    nominal_rate = (unwrapped[-1] - unwrapped[0]) / time_delta
                    if self.drift_sigma is None:
                        begin_rate = nominal_rate
                        accel = 0
                    else:
                        # This random number is for the uniform drift across the whole
                        # observation.  All processes along the row of the grid should
                        # use the same value here.
                        counter2 = 0
                        component = 1
                        rngdata = rng.random(
                            1,
                            sampler="gaussian",
                            key=(key1, key2 + component),
                            counter=(counter1, counter2),
                        )
                        sigma = self.drift_sigma.to_value(1 / u.s) * time_delta
                        drift = rngdata[0] * sigma
                        begin_rate = nominal_rate * (1 - drift)
                        end_rate = nominal_rate * (1 + drift)
                        accel = (end_rate - begin_rate) / time_delta
                    # Now calculcate the HWP angle subject to jitter and drift
                    t = new_times - new_times[0]
                    new_angle = 0.5 * accel * t**2 + begin_rate * t + hwp_angle[0]
            if obs.comm_col_size > 1:
                failed = obs.comm_col.allreduce(failed, op=MPI.SUM)
            if failed == 0:
                # Set the new HWP angle values
                obs.shared[self.hwp_angle].set(new_angle, offset=(0,), fromrank=0)
            else:
                all_failed += 1

        # All processes raise ValueError or not.
        if data.comm.comm_world is not None:
            all_failed = data.comm.comm_world.allreduce(all_failed, op=MPI.SUM)
        if all_failed > 0:
            msg = "One or more observations had incompatible HWP values"
            raise ValueError(msg)

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        return {
            "shared": [
                self.times,
                self.hwp_angle,
            ]
        }

    def _provides(self):
        return dict()

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

drift_sigma = Quantity(None, allow_none=True, help='1-sigma relative change in spin rate, such as 0.01 / hour') class-attribute instance-attribute

hwp_angle = Unicode(defaults.hwp_angle, allow_none=True, help='Observation shared key for HWP angle') class-attribute instance-attribute

realization = Int(0, allow_none=False, help='Realization index') class-attribute instance-attribute

time_sigma = Quantity(None, allow_none=True, help='1-sigma difference between real and nominal time stamps') class-attribute instance-attribute

times = Unicode(defaults.times, help='Observation shared key for timestamps') class-attribute instance-attribute

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/sim_hwp.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    t0 = time()
    log = Logger.get()

    for trait in ("times", "hwp_angle"):
        if getattr(self, trait) is None:
            msg = f"You must set the '{trait}' trait before calling exec()"
            raise RuntimeError(msg)

    all_failed = 0
    for iobs, obs in enumerate(data.obs):
        offset = obs.local_index_offset
        nlocal = obs.n_local_samples
        ntotal = obs.n_all_samples

        # Get an RNG seed
        key1 = self.realization * 1543343 + obs.telescope.uid
        key2 = obs.session.uid
        counter1 = 0

        # The times and hwp_angle are shared among columns of the process
        # grid.  Only the first process row needs to modify the data.
        if (
            obs.shared.comm_type(self.times) != "column"
            or obs.shared.comm_type(self.hwp_angle) != "column"
        ):
            msg = f"obs {obs.name}: expected shared fields {self.times} and "
            msg += f"{self.hwp_angle} to be on the column communicator."
            log.error(msg)
            all_failed += 1
            continue

        failed = 0
        new_angle = None
        if obs.comm_col_rank == 0:
            times = obs.shared[self.times].data
            hwp_angle = obs.shared[self.hwp_angle].data

            # We are in the first process row.  In our RNG generation,
            # "counter2" corresponds to the sample index.  If there are
            # multiple processes in the grid row, start our RNG stream
            # at the first sample on this process.
            counter2 = obs.local_index_offset

            time_delta = times[-1] - times[0]

            # Simulate timing error (jitter)
            if self.time_sigma is None:
                time_error = 0
            else:
                component = 0
                rngdata = rng.random(
                    times.size,
                    sampler="gaussian",
                    key=(key1, key2 + component),
                    counter=(counter1, counter2),
                )
                time_error = np.array(rngdata) * self.time_sigma.to_value(u.s)
            new_times = times + time_error
            if np.any(np.diff(new_times) <= 0):
                raise RuntimeError("Simulated timing error causes time travel")

            # Simulate rate drift
            unwrapped = np.unwrap(hwp_angle)
            median_step = np.median(np.diff(unwrapped))
            if np.abs(median_step) < 1e-10:
                # This was a stepped HWP, not continuously rotating
                msg = f"obs {obs.name}: Don't know now to perturb a stepped HWP. "
                msg += f"Median step size is {np.degrees(median_step)} deg"
                log.error(msg)
                failed += 1
            else:
                nominal_rate = (unwrapped[-1] - unwrapped[0]) / time_delta
                if self.drift_sigma is None:
                    begin_rate = nominal_rate
                    accel = 0
                else:
                    # This random number is for the uniform drift across the whole
                    # observation.  All processes along the row of the grid should
                    # use the same value here.
                    counter2 = 0
                    component = 1
                    rngdata = rng.random(
                        1,
                        sampler="gaussian",
                        key=(key1, key2 + component),
                        counter=(counter1, counter2),
                    )
                    sigma = self.drift_sigma.to_value(1 / u.s) * time_delta
                    drift = rngdata[0] * sigma
                    begin_rate = nominal_rate * (1 - drift)
                    end_rate = nominal_rate * (1 + drift)
                    accel = (end_rate - begin_rate) / time_delta
                # Now calculcate the HWP angle subject to jitter and drift
                t = new_times - new_times[0]
                new_angle = 0.5 * accel * t**2 + begin_rate * t + hwp_angle[0]
        if obs.comm_col_size > 1:
            failed = obs.comm_col.allreduce(failed, op=MPI.SUM)
        if failed == 0:
            # Set the new HWP angle values
            obs.shared[self.hwp_angle].set(new_angle, offset=(0,), fromrank=0)
        else:
            all_failed += 1

    # All processes raise ValueError or not.
    if data.comm.comm_world is not None:
        all_failed = data.comm.comm_world.allreduce(all_failed, op=MPI.SUM)
    if all_failed > 0:
        msg = "One or more observations had incompatible HWP values"
        raise ValueError(msg)

_finalize(data, **kwargs)

Source code in toast/ops/sim_hwp.py
270
271
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_hwp.py
281
282
def _provides(self):
    return dict()

_requires()

Source code in toast/ops/sim_hwp.py
273
274
275
276
277
278
279
def _requires(self):
    return {
        "shared": [
            self.times,
            self.hwp_angle,
        ]
    }

toast.ops.CrossTalk

Bases: Operator

Simulate readout crosstalk between channels.

  1. The cross talk matrix can just be a dictionary of dictionaries of values (i.e. a sparse matrix) on every process. It does not need to be a dense matrix loaded from an HDF5 file. The calling code can create this however it likes.

  2. Each process has a DetectorData object representing the local data for some detectors and some timespan (e.g. obs.detdata["signal"]). It can make a copy of this and pass it to the next rank in the grid column. Each process receives a copy from the previous process in the column, accumulates to its local detectors, and passes it along. This continues until every process has accumulated the data from the other processes in the column.

Source code in toast/ops/sim_crosstalk.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
@trait_docs
class CrossTalk(Operator):
    """Simulate readout crosstalk between channels.

    1.  The cross talk matrix can just be a dictionary of
    dictionaries of values (i.e. a sparse matrix) on every process.
    It does not need to be a dense matrix loaded from an HDF5 file.
    The calling code can create this however it likes.

    2. Each process has a DetectorData object representing the local data for some
    detectors and some timespan (e.g. obs.detdata["signal"]).
    It can make a copy of this and pass it to the next rank in the grid column.
    Each process receives a copy from the previous process in the column,
    accumulates to its local detectors, and passes it along.
    This continues until every process has accumulated the data
    from the other processes in the column.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    view = Unicode(
        None, allow_none=True, help="Use this view of the data in all observations"
    )

    det_data = Unicode(
        defaults.det_data,
        allow_none=True,
        help="Observation detdata key for the timestream data",
    )

    det_data_units = Unit(
        defaults.det_data_units, help="Output units if creating detector data"
    )

    xtalk_mat_file = Unicode(
        None, allow_none=True, help="CrossTalk matrix dictionary of dictionaries"
    )

    detector_ordering = Unicode(
        "random",
        help="Initialize Crosstalk matrix with detector ordering: `random, gap,constant` default `random` ",
    )

    realization = Int(0, help="integer to set a different random seed ")

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @function_timer
    def exec_roundrobin(self, data, detectors=None, **kwargs):
        env = Environment.get()
        log = Logger.get()
        if self.xtalk_mat_file is None:
            self.xtalk_mat = init_xtalk_matrix(data, realization=self.realization)

        for ob in data.obs:
            # Get the detectors we are using for this observation
            dets = ob.select_local_detectors(detectors)
            Ndets = len(dets)
            if Ndets == 0:
                continue
            comm = ob.comm.comm_group
            rank = ob.comm.group_rank
            exists = ob.detdata.ensure(
                self.det_data, detectors=dets, create_units=self.det_data_units
            )
            telescope = ob.telescope.uid
            focalplane = ob.telescope.focalplane
            # we loop over all the procs except rank

            procs = np.arange(ob.comm.group_size)
            procs = procs[procs != rank]
            tmp = np.zeros_like(ob.detdata[self.det_data].data)
            for idet, det in enumerate(dets):
                xtalklist = list(self.xtalk_mat[det].keys())
                # we firstly xtalk local detectors in each rank
                intersect_local = np.intersect1d(
                    ob.detdata[self.det_data].detectors, xtalklist
                )
                ind1 = [xtalklist.index(k) for k in intersect_local]
                ind2 = [
                    ob.detdata[self.det_data].detectors.index(k)
                    for k in intersect_local
                ]

                xtalk_weights = np.array(
                    [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
                )

                tmp[idet] += np.dot(
                    xtalk_weights, ob.detdata[self.det_data].data[ind2, :]
                )
                # assert  old.var() !=   ob.detdata[self.det_data][det] .var()
                for ip in procs:
                    # send and recv data

                    comm.isend(
                        ob.detdata[self.det_data].detectors, dest=ip, tag=rank * 10 + ip
                    )
                    req = comm.irecv(source=ip, tag=ip * 10 + rank)
                    detlist = req.wait()
                    print(detlist, "detlist", rank, ip)
                    intersect = list(set(detlist).intersection(set(xtalklist)))
                    print(intersect, "intersect", rank, ip)
                    # intersect = np.intersect1d( detlist,xtalklist)
                    ## we make sure that we communicate the samples
                    # ONLY in case some of the  detectors sent by a rank  xtalking with det
                    # if intersect.size ==0: continue
                    if len(intersect) == 0:
                        continue

                    # define the indices of Xtalk coefficients and of detdata

                    ind1 = [xtalklist.index(k) for k in intersect]
                    ind2 = [detlist.index(k) for k in intersect]
                    xtalk_weights = np.array(
                        [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
                    )

                    # send and recv detdata

                    comm.isend(
                        ob.detdata[self.det_data].data, dest=ip, tag=rank * 10 + ip
                    )
                    # buf = bytearray(1<<20) # 1 MB buffer, make it larger if needed.
                    # req= comm.irecv( buf=buf,source=ip ,
                    req = comm.irecv(source=ip, tag=ip * 10 + rank)
                    #                    success,detdata = req.test()
                    detdata = req.wait()

                    tmp[idet] += np.dot(xtalk_weights, detdata[ind2])

            # before updating detdata samples we make sure
            # that all the send/receive have been performed
            comm.Barrier()
            for idet, det in enumerate(dets):
                ob.detdata[self.det_data][det] += tmp[idet]

        return

    @function_timer
    def _exec(self, data, **kwargs):
        env = Environment.get()
        log = Logger.get()
        ## Read the XTalk matrix from file or initialize it randomly

        if self.xtalk_mat_file is None:
            self.xtalk_mat = init_xtalk_matrix(data, realization=self.realization)
        else:
            self.xtalk_mat = read_xtalk_matrix(self.xtalk_mat_file, data)

        for ob in data.obs:
            # Get the detectors we are using for this observation
            comm = ob.comm.comm_group
            rank = ob.comm.group_rank
            # Detdata are usually distributed by detectors,
            # to crosstalk is more convenient to  redistribute them by time,
            # so that   each process has the samples from all detectors at a given
            # time stamp
            if ob.comm.group_size > 1:
                old_data_shape = ob.detdata[self.det_data].data.shape
                ob.redistribute(1, times=ob.shared["times"])
                # Now ob.local_detectors == ob.all_detectors and
                # the number of local samples is some small slice of the total
                new_data_shape = ob.detdata[self.det_data].data.shape
                assert old_data_shape != new_data_shape
                assert new_data_shape[0] == len(ob.all_detectors)

            # we store the crosstalked data into a temporary array
            tmp = np.zeros_like(ob.detdata[self.det_data].data)
            for idet, det in enumerate(ob.all_detectors):
                # for a given detector we assume that only a subset
                # of detectors can be crosstalking
                xtalklist = list(self.xtalk_mat[det].keys())
                intersect_local = np.intersect1d(ob.all_detectors, xtalklist)
                ind1 = [xtalklist.index(k) for k in intersect_local]
                # ind2 = [ ob.detdata[self.det_data].detectors .index(k)  for  k in intersect_local]
                ind2 = [ob.all_detectors.index(k) for k in intersect_local]
                xtalk_weights = np.array(
                    [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
                )
                tmp[idet] += np.dot(
                    xtalk_weights, ob.detdata[self.det_data].data[ind2, :]
                )

            for idet, det in enumerate(ob.all_detectors):
                ob.detdata[self.det_data][det] += tmp[idet]

            # We distribute the data back to the previous distribution
            if ob.comm.group_size > 1:
                ob.redistribute(ob.comm.group_size, times=ob.shared["times"])

        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": [
                self.boresight,
            ],
            "detdata": [self.det_data],
            "intervals": list(),
        }
        if self.view is not None:
            req["intervals"].append(self.view)
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": [
                self.det_data,
            ],
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_data = Unicode(defaults.det_data, allow_none=True, help='Observation detdata key for the timestream data') class-attribute instance-attribute

det_data_units = Unit(defaults.det_data_units, help='Output units if creating detector data') class-attribute instance-attribute

detector_ordering = Unicode('random', help='Initialize Crosstalk matrix with detector ordering: `random, gap,constant` default `random` ') class-attribute instance-attribute

realization = Int(0, help='integer to set a different random seed ') class-attribute instance-attribute

view = Unicode(None, allow_none=True, help='Use this view of the data in all observations') class-attribute instance-attribute

xtalk_mat_file = Unicode(None, allow_none=True, help='CrossTalk matrix dictionary of dictionaries') class-attribute instance-attribute

__init__(**kwargs)

Source code in toast/ops/sim_crosstalk.py
174
175
def __init__(self, **kwargs):
    super().__init__(**kwargs)

_exec(data, **kwargs)

Source code in toast/ops/sim_crosstalk.py
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@function_timer
def _exec(self, data, **kwargs):
    env = Environment.get()
    log = Logger.get()
    ## Read the XTalk matrix from file or initialize it randomly

    if self.xtalk_mat_file is None:
        self.xtalk_mat = init_xtalk_matrix(data, realization=self.realization)
    else:
        self.xtalk_mat = read_xtalk_matrix(self.xtalk_mat_file, data)

    for ob in data.obs:
        # Get the detectors we are using for this observation
        comm = ob.comm.comm_group
        rank = ob.comm.group_rank
        # Detdata are usually distributed by detectors,
        # to crosstalk is more convenient to  redistribute them by time,
        # so that   each process has the samples from all detectors at a given
        # time stamp
        if ob.comm.group_size > 1:
            old_data_shape = ob.detdata[self.det_data].data.shape
            ob.redistribute(1, times=ob.shared["times"])
            # Now ob.local_detectors == ob.all_detectors and
            # the number of local samples is some small slice of the total
            new_data_shape = ob.detdata[self.det_data].data.shape
            assert old_data_shape != new_data_shape
            assert new_data_shape[0] == len(ob.all_detectors)

        # we store the crosstalked data into a temporary array
        tmp = np.zeros_like(ob.detdata[self.det_data].data)
        for idet, det in enumerate(ob.all_detectors):
            # for a given detector we assume that only a subset
            # of detectors can be crosstalking
            xtalklist = list(self.xtalk_mat[det].keys())
            intersect_local = np.intersect1d(ob.all_detectors, xtalklist)
            ind1 = [xtalklist.index(k) for k in intersect_local]
            # ind2 = [ ob.detdata[self.det_data].detectors .index(k)  for  k in intersect_local]
            ind2 = [ob.all_detectors.index(k) for k in intersect_local]
            xtalk_weights = np.array(
                [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
            )
            tmp[idet] += np.dot(
                xtalk_weights, ob.detdata[self.det_data].data[ind2, :]
            )

        for idet, det in enumerate(ob.all_detectors):
            ob.detdata[self.det_data][det] += tmp[idet]

        # We distribute the data back to the previous distribution
        if ob.comm.group_size > 1:
            ob.redistribute(ob.comm.group_size, times=ob.shared["times"])

    return

_finalize(data, **kwargs)

Source code in toast/ops/sim_crosstalk.py
323
324
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/sim_crosstalk.py
339
340
341
342
343
344
345
346
347
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": [
            self.det_data,
        ],
    }
    return prov

_requires()

Source code in toast/ops/sim_crosstalk.py
326
327
328
329
330
331
332
333
334
335
336
337
def _requires(self):
    req = {
        "meta": list(),
        "shared": [
            self.boresight,
        ],
        "detdata": [self.det_data],
        "intervals": list(),
    }
    if self.view is not None:
        req["intervals"].append(self.view)
    return req

exec_roundrobin(data, detectors=None, **kwargs)

Source code in toast/ops/sim_crosstalk.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
@function_timer
def exec_roundrobin(self, data, detectors=None, **kwargs):
    env = Environment.get()
    log = Logger.get()
    if self.xtalk_mat_file is None:
        self.xtalk_mat = init_xtalk_matrix(data, realization=self.realization)

    for ob in data.obs:
        # Get the detectors we are using for this observation
        dets = ob.select_local_detectors(detectors)
        Ndets = len(dets)
        if Ndets == 0:
            continue
        comm = ob.comm.comm_group
        rank = ob.comm.group_rank
        exists = ob.detdata.ensure(
            self.det_data, detectors=dets, create_units=self.det_data_units
        )
        telescope = ob.telescope.uid
        focalplane = ob.telescope.focalplane
        # we loop over all the procs except rank

        procs = np.arange(ob.comm.group_size)
        procs = procs[procs != rank]
        tmp = np.zeros_like(ob.detdata[self.det_data].data)
        for idet, det in enumerate(dets):
            xtalklist = list(self.xtalk_mat[det].keys())
            # we firstly xtalk local detectors in each rank
            intersect_local = np.intersect1d(
                ob.detdata[self.det_data].detectors, xtalklist
            )
            ind1 = [xtalklist.index(k) for k in intersect_local]
            ind2 = [
                ob.detdata[self.det_data].detectors.index(k)
                for k in intersect_local
            ]

            xtalk_weights = np.array(
                [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
            )

            tmp[idet] += np.dot(
                xtalk_weights, ob.detdata[self.det_data].data[ind2, :]
            )
            # assert  old.var() !=   ob.detdata[self.det_data][det] .var()
            for ip in procs:
                # send and recv data

                comm.isend(
                    ob.detdata[self.det_data].detectors, dest=ip, tag=rank * 10 + ip
                )
                req = comm.irecv(source=ip, tag=ip * 10 + rank)
                detlist = req.wait()
                print(detlist, "detlist", rank, ip)
                intersect = list(set(detlist).intersection(set(xtalklist)))
                print(intersect, "intersect", rank, ip)
                # intersect = np.intersect1d( detlist,xtalklist)
                ## we make sure that we communicate the samples
                # ONLY in case some of the  detectors sent by a rank  xtalking with det
                # if intersect.size ==0: continue
                if len(intersect) == 0:
                    continue

                # define the indices of Xtalk coefficients and of detdata

                ind1 = [xtalklist.index(k) for k in intersect]
                ind2 = [detlist.index(k) for k in intersect]
                xtalk_weights = np.array(
                    [self.xtalk_mat[det][kk] for kk in np.array(xtalklist)[ind1]]
                )

                # send and recv detdata

                comm.isend(
                    ob.detdata[self.det_data].data, dest=ip, tag=rank * 10 + ip
                )
                # buf = bytearray(1<<20) # 1 MB buffer, make it larger if needed.
                # req= comm.irecv( buf=buf,source=ip ,
                req = comm.irecv(source=ip, tag=ip * 10 + rank)
                #                    success,detdata = req.test()
                detdata = req.wait()

                tmp[idet] += np.dot(xtalk_weights, detdata[ind2])

        # before updating detdata samples we make sure
        # that all the send/receive have been performed
        comm.Barrier()
        for idet, det in enumerate(dets):
            ob.detdata[self.det_data][det] += tmp[idet]

    return

toast.ops.YieldCut

Bases: Operator

Operator that simulates non-perfect yield.

When TES detectors have their bias tuned, not all detectors have sufficient responsivity to be useful for science. This can be a temporary problem. This operator simulates a random loss in detector yield.

The det_mask trait is used to select incoming "good" detectors. This selection of good detectors then has the yield cut applied.

Source code in toast/ops/yield_cut.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
@trait_docs
class YieldCut(Operator):
    """Operator that simulates non-perfect yield.

    When TES detectors have their bias tuned, not all detectors have sufficient
    responsivity to be useful for science.  This can be a temporary problem.  This
    operator simulates a random loss in detector yield.

    The `det_mask` trait is used to select incoming "good" detectors.  This selection
    of good detectors then has the yield cut applied.

    """

    # Class traits

    API = Int(0, help="Internal interface version for this operator")

    det_mask = Int(
        defaults.det_mask_invalid,
        help="Bit mask value for input per-detector flagging",
    )

    det_flags = Unicode(
        defaults.det_flags,
        allow_none=True,
        help="Observation detdata key for flags to use",
    )

    det_flag_mask = Int(
        defaults.det_mask_invalid, help="Bit mask value for flagging cut detectors"
    )

    keep_frac = Float(0.9, help="Fraction of detectors to keep")

    focalplane_key = Unicode(
        "pixel",
        help="Which focalplane key to use for randomization.  "
        "Detectors that share the key value are flagged together",
    )

    fixed = Bool(
        False,
        help="If True, detector cuts do not change between observations "
        "and realizations",
    )

    realization = Int(0, help="The realization index")

    @traitlets.validate("det_mask")
    def _check_det_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Det mask should be a positive integer")
        return check

    @traitlets.validate("det_flag_mask")
    def _check_det_flag_mask(self, proposal):
        check = proposal["value"]
        if check < 0:
            raise traitlets.TraitError("Flag mask should be a positive integer")
        return check

    @function_timer
    def _exec(self, data, detectors=None, **kwargs):
        t0 = time()
        env = Environment.get()
        log = Logger.get()

        for obs in data.obs:
            focalplane = obs.telescope.focalplane
            dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)

            # For reproducibility, generate the random cut across all detectors
            # in the observation.  This means that a given process might have more
            # or less detectors cut than the target fraction, but the overall
            # value should be close for a large enough number of detectors.

            exists = obs.detdata.ensure(self.det_flags, dtype=np.uint8, detectors=dets)
            new_flags = dict()
            for det in dets:
                key1 = obs.telescope.uid
                if self.fixed:
                    key2 = 0
                    counter1 = 0
                else:
                    key2 = self.realization
                    counter1 = obs.session.uid
                if self.focalplane_key is not None:
                    value = focalplane[det][self.focalplane_key]
                    counter2 = name_UID(value)
                else:
                    counter2 = focalplane[det]["UID"]
                x = rng.random(
                    1,
                    sampler="uniform_01",
                    key=(key1, key2),
                    counter=(counter1, counter2),
                )[0]
                if x > self.keep_frac:
                    obs.detdata[self.det_flags][det] |= self.det_flag_mask
                    new_flags[det] = self.det_flag_mask
            obs.update_local_detector_flags(new_flags)
        return

    def _finalize(self, data, **kwargs):
        return

    def _requires(self):
        req = {
            "meta": list(),
            "shared": list(),
            "detdata": [self.det_flags],
        }
        return req

    def _provides(self):
        prov = {
            "meta": list(),
            "shared": list(),
            "detdata": list(),
        }
        return prov

API = Int(0, help='Internal interface version for this operator') class-attribute instance-attribute

det_flag_mask = Int(defaults.det_mask_invalid, help='Bit mask value for flagging cut detectors') class-attribute instance-attribute

det_flags = Unicode(defaults.det_flags, allow_none=True, help='Observation detdata key for flags to use') class-attribute instance-attribute

det_mask = Int(defaults.det_mask_invalid, help='Bit mask value for input per-detector flagging') class-attribute instance-attribute

fixed = Bool(False, help='If True, detector cuts do not change between observations and realizations') class-attribute instance-attribute

focalplane_key = Unicode('pixel', help='Which focalplane key to use for randomization. Detectors that share the key value are flagged together') class-attribute instance-attribute

keep_frac = Float(0.9, help='Fraction of detectors to keep') class-attribute instance-attribute

realization = Int(0, help='The realization index') class-attribute instance-attribute

_check_det_flag_mask(proposal)

Source code in toast/ops/yield_cut.py
78
79
80
81
82
83
@traitlets.validate("det_flag_mask")
def _check_det_flag_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Flag mask should be a positive integer")
    return check

_check_det_mask(proposal)

Source code in toast/ops/yield_cut.py
71
72
73
74
75
76
@traitlets.validate("det_mask")
def _check_det_mask(self, proposal):
    check = proposal["value"]
    if check < 0:
        raise traitlets.TraitError("Det mask should be a positive integer")
    return check

_exec(data, detectors=None, **kwargs)

Source code in toast/ops/yield_cut.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
@function_timer
def _exec(self, data, detectors=None, **kwargs):
    t0 = time()
    env = Environment.get()
    log = Logger.get()

    for obs in data.obs:
        focalplane = obs.telescope.focalplane
        dets = obs.select_local_detectors(detectors, flagmask=self.det_mask)

        # For reproducibility, generate the random cut across all detectors
        # in the observation.  This means that a given process might have more
        # or less detectors cut than the target fraction, but the overall
        # value should be close for a large enough number of detectors.

        exists = obs.detdata.ensure(self.det_flags, dtype=np.uint8, detectors=dets)
        new_flags = dict()
        for det in dets:
            key1 = obs.telescope.uid
            if self.fixed:
                key2 = 0
                counter1 = 0
            else:
                key2 = self.realization
                counter1 = obs.session.uid
            if self.focalplane_key is not None:
                value = focalplane[det][self.focalplane_key]
                counter2 = name_UID(value)
            else:
                counter2 = focalplane[det]["UID"]
            x = rng.random(
                1,
                sampler="uniform_01",
                key=(key1, key2),
                counter=(counter1, counter2),
            )[0]
            if x > self.keep_frac:
                obs.detdata[self.det_flags][det] |= self.det_flag_mask
                new_flags[det] = self.det_flag_mask
        obs.update_local_detector_flags(new_flags)
    return

_finalize(data, **kwargs)

Source code in toast/ops/yield_cut.py
127
128
def _finalize(self, data, **kwargs):
    return

_provides()

Source code in toast/ops/yield_cut.py
138
139
140
141
142
143
144
def _provides(self):
    prov = {
        "meta": list(),
        "shared": list(),
        "detdata": list(),
    }
    return prov

_requires()

Source code in toast/ops/yield_cut.py
130
131
132
133
134
135
136
def _requires(self):
    req = {
        "meta": list(),
        "shared": list(),
        "detdata": [self.det_flags],
    }
    return req